| /linux/lib/crypto/x86/ |
| H A D | blake2s-core.S | 70 movdqu 16(CTX),%xmm1 // Load h[4..7] 82 movdqa %xmm1,%xmm11 // Save h[4..7] and let v[4..7] = h[4..7] 103 paddd %xmm1,%xmm0 107 pxor %xmm2,%xmm1 108 movdqa %xmm1,%xmm8 109 psrld $12,%xmm1 111 por %xmm8,%xmm1 124 paddd %xmm1,%xmm0 128 pxor %xmm2,%xmm1 129 movdqa %xmm1,%xmm8 [all …]
|
| H A D | chacha-ssse3-x86_64.S | 43 paddd %xmm1,%xmm0 49 pxor %xmm2,%xmm1 50 movdqa %xmm1,%xmm6 52 psrld $20,%xmm1 53 por %xmm6,%xmm1 56 paddd %xmm1,%xmm0 62 pxor %xmm2,%xmm1 63 movdqa %xmm1,%xmm7 65 psrld $25,%xmm1 66 por %xmm7,%xmm1 [all …]
|
| H A D | aes-aesni.S | 109 movdqu 16(IN_KEY), %xmm1 110 movdqu %xmm1, 16(RNDKEYS) 137 _gen_round_key %xmm0, %xmm1 146 _prefix_sum %xmm1, tmp=%xmm3 147 pxor %xmm2, %xmm1 148 movdqu %xmm1, 16(RNDKEYS) 219 movdqu (RNDKEYS), %xmm1 220 pxor %xmm1, %xmm0 226 movdqu (RNDKEYS), %xmm1 228 aesenc %xmm1, %xmm0 [all …]
|
| H A D | polyval-pclmul-avx.S | 93 vpclmulqdq $0x00, (16*\i)(KEY_POWERS), %xmm0, %xmm1 97 vpxor %xmm1, LO, LO 108 vpclmulqdq $0x01, %xmm0, %xmm1, MI 109 vpclmulqdq $0x10, %xmm0, %xmm1, %xmm2 110 vpclmulqdq $0x00, %xmm0, %xmm1, LO 111 vpclmulqdq $0x11, %xmm0, %xmm1, HI 237 movups (KEY_POWERS), %xmm1 273 movups (%rsi), %xmm1
|
| H A D | sha512-ssse3-asm.S | 215 movdqa %xmm2, %xmm1 # XMM1 = W[t-2] 221 psllq $(64-19)-(64-61) , %xmm1 # XMM1 = W[t-2] << 42 228 pxor %xmm2, %xmm1 # XMM1 = (W[t-2] << 42)^W[t-2] 234 psllq $(64-61), %xmm1 # XMM1 = ((W[t-2] << 42)^W[t-2])<<3 240 pxor %xmm1, %xmm0 # XMM0 = s1(W[t-2]) 244 movdqu W_t(idx), %xmm1 # XMM1 = W[t-7] 253 paddq %xmm1, %xmm0 # XMM0 = s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16] 309 movdqa XMM_QWORD_BSWAP(%rip), %xmm1 311 pshufb %xmm1, %xmm0 # BSWAP 318 pshufb %xmm1, %xmm0 # BSWAP
|
| H A D | chacha-avx512vl-x86_64.S | 126 vextracti128 $1,%ymm7,%xmm1 149 vmovdqa %xmm1,%xmm7 184 vmovdqu8 (%rdx,%r9),%xmm1{%k1}{z} 185 vpxord %xmm7,%xmm1,%xmm1 186 vmovdqu8 %xmm1,(%rsi,%r9){%k1} 338 vextracti128 $1,%ymm10,%xmm1 361 vmovdqa %xmm1,%xmm10 450 vmovdqu8 (%rdx,%r9),%xmm1{%k1}{z} 451 vpxord %xmm10,%xmm1,%xmm1 452 vmovdqu8 %xmm1,(%rsi,%r9){%k1}
|
| H A D | sha512-avx-asm.S | 181 vpsrlq $19, %xmm4, %xmm1 # XMM1 = W[t-2]>>19 184 vpxor %xmm1, %xmm0, %xmm0 # XMM0 = W[t-2]>>61 ^ W[t-2]>>19 229 vmovdqu W_t(idx), %xmm1 # XMM1 = W[t-7] 241 vpaddq %xmm1, %xmm0, %xmm0 # XMM0 = W[t] = s1(W[t-2]) + W[t-7] + 310 vmovdqa XMM_QWORD_BSWAP(%rip), %xmm1 312 vpshufb %xmm1, %xmm0, %xmm0 # BSWAP 319 vpshufb %xmm1, %xmm0, %xmm0 # BSWAP
|
| H A D | sha1-ni-asm.S | 63 #define E0 %xmm1 /* Need two E's b/c they ping pong */
|
| H A D | nh-sse2.S | 13 #define PASS1_SUMS %xmm1
|
| H A D | sha256-ni-asm.S | 65 #define STATE0 %xmm1 203 #define STATE0_A %xmm1
|
| H A D | chacha-avx2-x86_64.S | 153 vextracti128 $1,%ymm7,%xmm1 176 vmovdqa %xmm1,%xmm7 404 vextracti128 $1,%ymm10,%xmm1 427 vmovdqa %xmm1,%xmm10
|
| /linux/lib/crc/x86/ |
| H A D | crc-pclmul-template.S | 310 _prepare_v0 16, %xmm0, %xmm1, BSWAP_MASK_XMM 390 vextracti128 $1, %ymm0, %xmm1 391 _fold_vec_final 16, %xmm0, %xmm1, CONSTS_XMM, BSWAP_MASK_XMM, %xmm4, %xmm5 419 _cond_vex pshufb, %xmm3, %xmm0, %xmm1 435 pblendvb %xmm2, %xmm1 // uses %xmm0 as implicit operand 438 vpblendvb %xmm3, -16(BUF,LEN), %xmm1, %xmm1 440 vpblendvb %xmm3, %xmm2, %xmm1, %xmm1 444 _fold_vec %xmm0, %xmm1, CONSTS_XMM, %xmm4 469 _pclmulqdq CONSTS_XMM, LO64_TERMS, %xmm0, HI64_TERMS, %xmm1 475 _cond_vex pxor, %xmm1, %xmm0, %xmm0 [all …]
|
| /linux/arch/x86/crypto/ |
| H A D | aesni-intel_asm.S | 28 #define IN1 %xmm1 69 pshufd $0b11111111, %xmm1, %xmm1 74 pxor %xmm1, %xmm0 82 pshufd $0b01010101, %xmm1, %xmm1 87 pxor %xmm1, %xmm0 96 movaps %xmm0, %xmm1 99 shufps $0b01001110, %xmm2, %xmm1 100 movaps %xmm1, 0x10(TKEYP) 106 pshufd $0b01010101, %xmm1, %xmm1 111 pxor %xmm1, %xmm0 [all …]
|
| H A D | aria-aesni-avx-asm_64.S | 889 inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 893 %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 895 aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 899 %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 901 aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 905 %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 907 aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 911 %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 913 aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 917 %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, [all …]
|
| H A D | camellia-aesni-avx-asm_64.S | 193 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 201 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3, 729 inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 733 enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 737 fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 745 enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 749 fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 757 enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 776 outunpack16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 787 fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, [all …]
|
| H A D | aes-xts-avx-x86_64.S | 319 _next_tweak TWEAK0_XMM, %xmm0, %xmm1 320 vinserti128 $1, %xmm1, TWEAK0, TWEAK0 759 _aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0, tmp=%xmm1 787 _aes_crypt \enc, _XMM, TWEAK1_XMM, %xmm0, tmp=%xmm1 800 vmovdqa %xmm0, %xmm1 802 vmovdqu8 %xmm1, 16(DST){%k1} 809 vmovdqu (SRC, LEN64, 1), %xmm1 823 vpshufb %xmm3, %xmm1, %xmm1 827 vpblendvb %xmm3, %xmm0, %xmm1, %xmm0 830 _aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0, tmp=%xmm1
|
| H A D | aes-gcm-vaes-avx512.S | 356 _ghash_square H_CUR_XMM, H_INC_XMM, GFPOLY_XMM, %xmm0, %xmm1 512 .set GHASHDATA1_XMM, %xmm1 1007 _horizontal_xor HI, HI_XMM, GHASH_ACC_XMM, %xmm0, %xmm1, %xmm2 1114 %xmm1, %xmm2, %xmm3 1118 %xmm1, %xmm2, %xmm3 1138 vpxor (%rax), GHASH_ACC, %xmm1 1139 vaesenclast %xmm1, %xmm0, GHASH_ACC 1142 vmovdqu (TAG), %xmm1 1143 vpternlogd $0x96, (%rax), GHASH_ACC, %xmm1 1144 vaesenclast %xmm1, %xmm0, %xmm0
|
| H A D | aes-gcm-vaes-avx2.S | 231 .set TMP1_XMM, %xmm1 510 .set TMP1_XMM, %xmm1 717 .set GHASH_ACC_XMM, %xmm1 1094 %xmm1, %xmm2, %xmm3 1098 %xmm1, %xmm2, %xmm3 1117 vpxor (%rax), GHASH_ACC, %xmm1 1118 vaesenclast %xmm1, %xmm0, GHASH_ACC 1125 vmovdqu (%rax, TAGLEN64), %xmm1 1126 vpshufb BSWAP_MASK, %xmm1, %xmm1 // select low bytes, not high 1128 vptest %xmm1, %xmm0
|
| H A D | aes-gcm-aesni-x86_64.S | 551 _ghash_mul H_POW1, H_POW1_X64, H_CUR, GFPOLY, %xmm0, %xmm1 602 _ghash_mul H_POW1, H_POW1_X64, GHASH_ACC, GFPOLY, %xmm0, %xmm1 616 _ghash_mul H_POW1, H_POW1_X64, GHASH_ACC, GFPOLY, %xmm0, %xmm1 708 .set TMP1, %xmm1 1034 _xor_mem_to_reg (GHASH_ACC_PTR), GHASH_ACC, %xmm1 1060 _ghash_mul_step \i, H_POW1, H_POW1_X64, GHASH_ACC, GFPOLY, %xmm1, %xmm2 1063 _ghash_mul_step 9, H_POW1, H_POW1_X64, GHASH_ACC, GFPOLY, %xmm1, %xmm2
|
| H A D | ghash-clmulni-intel_asm.S | 25 #define SHASH %xmm1
|
| H A D | sm4-aesni-avx2-asm_64.S | 46 #define RX1x %xmm1 50 #define RBSWAPx %xmm1
|
| H A D | sm4-aesni-avx-asm_64.S | 23 #define RX1 %xmm1 42 #define RBSWAP %xmm1
|
| H A D | cast6-avx-x86_64-asm_64.S | 38 #define RB1 %xmm1
|
| H A D | twofish-avx-x86_64-asm_64.S | 38 #define RB1 %xmm1
|
| /linux/arch/x86/entry/vdso/vdso64/ |
| H A D | vgetrandom-chacha.S | 33 .set state0, %xmm1
|