/linux/arch/alpha/lib/ |
H A D | ev67-strrchr.S | 37 insbl a1, 2, t5 # U : 0000000000ch0000 42 sll t5, 8, t3 # U : 00000000ch000000 46 or t5, t3, t3 # E : 00000000chch0000 53 lda t5, -1 # E : build garbage mask 56 mskqh t5, a0, t4 # E : Complete garbage mask 86 subq t4, 1, t5 # E : build a mask of the bytes up to... 87 or t4, t5, t4 # E : ... and including the null 102 lda t5, 0x3f($31) # E : 103 subq t5, t2, t5 # [all...] |
H A D | strrchr.S | 24 sll a1, 8, t5 # e0 : replicate our test character 26 or t5, a1, a1 # e0 : 28 sll a1, 16, t5 # e0 : 30 or t5, a1, a1 # e0 : 32 sll a1, 32, t5 # e0 : 35 or t5, a1, a1 # .. e1 : character replication complete 58 subq t4, 1, t5 # e0 : build a mask of the bytes up to... 59 or t4, t5, t4 # e1 : ... and including the null
|
H A D | ev67-strchr.S | 34 insbl a1, 1, t5 # U : 000000000000ch00 38 or t5, t3, a1 # E : 000000000000chch 44 inswl a1, 2, t5 # E : 00000000chch0000 48 or a3, t5, t5 # E : 0000chchchch0000 53 or t5, a1, a1 # E : chchchchchchchch
|
H A D | strchr.S | 24 sll a1, 8, t5 # e0 : replicate the search character 26 or t5, a1, a1 # e0 : 28 sll a1, 16, t5 # e0 : 31 or t5, a1, a1 # .. e1 : 32 sll a1, 32, t5 # e0 : 34 or t5, a1, a1 # e0 :
|
H A D | stxcpy.S | 239 and a1, 7, t5 # e0 : find src misalignment 256 cmplt t4, t5, t12 # e0 : 260 mskqh t2, t5, t2 # e0 : 275 and a1, 7, t5 # .. e1 : 278 srl t12, t5, t12 # e0 : adjust final null return value
|
H A D | ev6-stxcpy.S | 269 and a1, 7, t5 # E : find src misalignment 287 cmplt t4, t5, t12 # E : 291 mskqh t2, t5, t2 # U : 304 and a1, 7, t5 # E : 308 srl t12, t5, t12 # U : adjust final null return value
|
/linux/arch/riscv/lib/ |
H A D | memmove.S | 37 * Both Copy Modes: t5 - Inclusive first multibyte/aligned of dest 50 * Byte copy does not need t5 or t6. 65 * Now solve for t5 and t6. 67 andi t5, t3, -SZREG 78 beq t5, t3, 1f 79 addi t5, t5, SZREG 164 addi a2, t5, -SZREG /* The other breakpoint for the unrolled loop*/ 212 bne t4, t5, 1b 214 mv t4, t5 /* Fi [all...] |
H A D | strncmp.S | 66 * t0, t1, t2, t3, t4, t5, t6 70 li t5, -1 85 bne t3, t5, 2f 87 bne t3, t5, 2f
|
H A D | memcpy.S | 52 REG_L t5, 9*SZREG(a1) 62 REG_S t5, 9*SZREG(t6)
|
/linux/lib/crc/arm64/ |
H A D | crc-t10dif-core.S | 83 t5 .req v19 150 tbl t5.16b, {\a16\().16b}, perm.16b 154 eor \c64\().16b, t8.16b, t5.16b 158 ext t6.16b, t5.16b, t5.16b, #8 160 pmull t3.8h, t7.8b, t5.8b 162 pmull2 t5.8h, t7.16b, t5.16b 167 ext t7.16b, t5.16b, t5 [all...] |
/linux/lib/zlib_dfltcc/ |
H A D | dfltcc_util.h | 43 size_t t5 = len2 ? *len2 : 0; in dfltcc() local 49 register size_t r5 __asm__("r5") = t5; in dfltcc() 64 t2 = r2; t3 = r3; t4 = r4; t5 = r5; in dfltcc() 97 *len2 = t5; in dfltcc()
|
/linux/arch/powerpc/crypto/ |
H A D | ghashp10-ppc.pl | 60 my ($t4,$t5,$t6) = ($Hl,$H,$Hh); 196 vsldoi $t5,$zero,$Xm1,8 200 vxor $Xh1,$Xh1,$t5 208 vsldoi $t5,$Xl1,$Xl1,8 # 2nd reduction phase 212 vxor $t5,$t5,$Xh1 214 vxor $Xl1,$Xl1,$t5
|
/linux/crypto/ |
H A D | ecc.c | 1134 u64 t5[ECC_MAX_DIGITS]; in ecc_point_double_jacobian() local 1143 /* t5 = x1*y1^2 = A */ in ecc_point_double_jacobian() 1144 vli_mod_mult_fast(t5, x1, t4, curve); in ecc_point_double_jacobian() 1178 vli_mod_sub(z1, z1, t5, curve_prime, ndigits); in ecc_point_double_jacobian() 1180 vli_mod_sub(z1, z1, t5, curve_prime, ndigits); in ecc_point_double_jacobian() 1181 /* t5 = A - x3 */ in ecc_point_double_jacobian() 1182 vli_mod_sub(t5, t5, z1, curve_prime, ndigits); in ecc_point_double_jacobian() 1184 vli_mod_mult_fast(x1, x1, t5, curve); in ecc_point_double_jacobian() 1235 u64 t5[ECC_MAX_DIGIT in xycz_add() local 1278 u64 t5[ECC_MAX_DIGITS]; xycz_add_c() local [all...] |
/linux/arch/loongarch/mm/ |
H A D | page.S | 49 ld.d t5, a1, 40 62 st.d t5, a0, 40 64 ld.d t5, a1, 104 77 st.d t5, a0, -24
|
/linux/arch/riscv/include/asm/ |
H A D | compat.h | 69 compat_ulong_t t5; member 106 cregs->t5 = (compat_ulong_t) regs->t5; in regs_to_cregs() 143 regs->t5 = (unsigned long) cregs->t5; in cregs_to_regs()
|
/linux/scripts/ |
H A D | makelst | 28 t5=`field 1 $t1` 29 t6=`printf "%lu" $((0x$t4 - 0x$t5))`
|
/linux/arch/mips/kernel/ |
H A D | scall32-o32.S | 61 load_a4: user_lw(t5, 16(t0)) # argument #5 from usp 67 sw t5, PT_ARG4(sp) # argument #5 to ksp 157 li t5, 0 196 lw t5, 24(sp) 199 sw t5, 20(sp)
|
/linux/arch/riscv/kernel/ |
H A D | mcount.S | 96 REG_L t5, 0(t3) 97 bne t5, t4, .Ldo_trace 126 jalr t5
|
/linux/arch/x86/crypto/ |
H A D | camellia-aesni-avx2-asm_64.S | 63 #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 70 vbroadcasti128 .Lpre_tf_lo_s1(%rip), t5; \ 87 filter_8bit(x0, t5, t6, t7, t4); \ 88 filter_8bit(x7, t5, t6, t7, t4); \ 95 filter_8bit(x2, t5, t6, t7, t4); \ 96 filter_8bit(x5, t5, t6, t7, t4); \ 97 filter_8bit(x1, t5, t6, t7, t4); \ 98 filter_8bit(x4, t5, t6, t7, t4); \ 104 vextracti128 $1, x5, t5##_x; \ 125 vaesenclast t4##_x, t5##_ [all...] |
H A D | camellia-aesni-avx-asm_64.S | 51 #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 108 vmovdqa .Lpost_tf_hi_s2(%rip), t5; \ 116 filter_8bit(x1, t4, t5, t7, t2); \ 117 filter_8bit(x4, t4, t5, t7, t2); \ 119 vpsrldq $5, t0, t5; \ 129 vpsrldq $2, t5, t7; \ 165 vpsrldq $1, t5, t3; \ 166 vpshufb t6, t5, t5; \ 181 vpxor t5, x [all...] |
/linux/arch/x86/include/asm/ |
H A D | syscall_wrapper.h | 63 #define SYSCALL_PT_ARG6(m, t1, t2, t3, t4, t5, t6) \ argument 64 SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5), m(t6, (regs->bp)) 65 #define SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5) \ argument 66 SYSCALL_PT_ARG4(m, t1, t2, t3, t4), m(t5, (regs->di))
|
/linux/arch/arm64/crypto/ |
H A D | ghash-ce-core.S | 27 t5 .req v12 73 ext t5.8b, \ad\().8b, \ad\().8b, #2 // A2 81 tbl t5.16b, {\ad\().16b}, perm2.16b // A2 102 pmull\t t5.8h, t5.\nb, \bd // H = A2*B 110 eor t5.16b, t5.16b, t6.16b // M = G + H 113 uzp1 t4.2d, t3.2d, t5.2d 114 uzp2 t3.2d, t3.2d, t5.2d 119 // t5 [all...] |
/linux/arch/loongarch/kernel/ |
H A D | rethook_trampoline.S | 23 cfi_st t5, PT_R17 58 cfi_ld t5, PT_R17
|
/linux/arch/arm/crypto/ |
H A D | aes-neonbs-core.S | 297 t0, t1, t2, t3, t4, t5, t6, t7, inv 307 vext.8 \t5, \x5, \x5, #12 310 veor \x5, \x5, \t5 320 veor \t5, \t5, \x4 335 veor \x7, \t1, \t5 353 t0, t1, t2, t3, t4, t5, t6, t7 358 vld1.8 {\t4-\t5}, [bskey, :256]! 364 veor \x5, \x5, \t5 379 vext.8 \t5, \x [all...] |
/linux/arch/alpha/include/uapi/asm/ |
H A D | regdef.h | 12 #define t5 $6 macro
|