/linux-6.12.1/arch/alpha/lib/ |
D | ev67-strrchr.S | 37 insbl a1, 2, t5 # U : 0000000000ch0000 42 sll t5, 8, t3 # U : 00000000ch000000 46 or t5, t3, t3 # E : 00000000chch0000 53 lda t5, -1 # E : build garbage mask 56 mskqh t5, a0, t4 # E : Complete garbage mask 86 subq t4, 1, t5 # E : build a mask of the bytes up to... 87 or t4, t5, t4 # E : ... and including the null 102 lda t5, 0x3f($31) # E : 103 subq t5, t2, t5 # E : Normalize leading zero count 105 addq t6, t5, v0 # E : and add to quadword address
|
D | strrchr.S | 24 sll a1, 8, t5 # e0 : replicate our test character 26 or t5, a1, a1 # e0 : 28 sll a1, 16, t5 # e0 : 30 or t5, a1, a1 # e0 : 32 sll a1, 32, t5 # e0 : 35 or t5, a1, a1 # .. e1 : character replication complete 58 subq t4, 1, t5 # e0 : build a mask of the bytes up to... 59 or t4, t5, t4 # e1 : ... and including the null
|
D | strchr.S | 24 sll a1, 8, t5 # e0 : replicate the search character 26 or t5, a1, a1 # e0 : 28 sll a1, 16, t5 # e0 : 31 or t5, a1, a1 # .. e1 : 32 sll a1, 32, t5 # e0 : 34 or t5, a1, a1 # e0 :
|
D | ev67-strchr.S | 34 insbl a1, 1, t5 # U : 000000000000ch00 38 or t5, t3, a1 # E : 000000000000chch 44 inswl a1, 2, t5 # E : 00000000chch0000 48 or a3, t5, t5 # E : 0000chchchch0000 53 or t5, a1, a1 # E : chchchchchchchch
|
D | stxcpy.S | 239 and a1, 7, t5 # e0 : find src misalignment 256 cmplt t4, t5, t12 # e0 : 260 mskqh t2, t5, t2 # e0 : 275 and a1, 7, t5 # .. e1 : 278 srl t12, t5, t12 # e0 : adjust final null return value
|
D | ev6-stxcpy.S | 269 and a1, 7, t5 # E : find src misalignment 287 cmplt t4, t5, t12 # E : 291 mskqh t2, t5, t2 # U : 304 and a1, 7, t5 # E : 308 srl t12, t5, t12 # U : adjust final null return value
|
/linux-6.12.1/arch/riscv/lib/ |
D | memmove.S | 67 andi t5, t3, -SZREG 78 beq t5, t3, 1f 79 addi t5, t5, SZREG 164 addi a2, t5, -SZREG /* The other breakpoint for the unrolled loop*/ 212 bne t4, t5, 1b 214 mv t4, t5 /* Fix the dest pointer in case the loop was broken */ 247 bne t4, t5, 1b 261 beq t3, t5, 2f 267 bne t3, t5, 1b
|
D | strncmp.S | 69 li t5, -1 84 bne t3, t5, 2f 86 bne t3, t5, 2f
|
D | memcpy.S | 52 REG_L t5, 9*SZREG(a1) 62 REG_S t5, 9*SZREG(t6)
|
/linux-6.12.1/arch/arm64/crypto/ |
D | crct10dif-ce-core.S | 85 t5 .req v19 137 ext t5.8b, ad.8b, ad.8b, #2 // A2 142 pmull t5.8h, t5.8b, fold_consts.8b // H = A2*B 151 tbl t5.16b, {ad.16b}, perm2.16b // A2 156 pmull2 t5.8h, t5.16b, fold_consts.16b // H = A2*B 163 eor t5.16b, t5.16b, t7.16b // M = G + H 166 uzp1 t8.2d, t4.2d, t5.2d 167 uzp2 t4.2d, t4.2d, t5.2d 184 zip2 t5.2d, t8.2d, t4.2d 190 ext t5.16b, t5.16b, t5.16b, #14 [all …]
|
D | ghash-ce-core.S | 27 t5 .req v12 73 ext t5.8b, \ad\().8b, \ad\().8b, #2 // A2 81 tbl t5.16b, {\ad\().16b}, perm2.16b // A2 102 pmull\t t5.8h, t5.\nb, \bd // H = A2*B 110 eor t5.16b, t5.16b, t6.16b // M = G + H 113 uzp1 t4.2d, t3.2d, t5.2d 114 uzp2 t3.2d, t3.2d, t5.2d 131 zip2 t5.2d, t4.2d, t3.2d 137 ext t5.16b, t5.16b, t5.16b, #14 141 eor t3.16b, t3.16b, t5.16b
|
/linux-6.12.1/lib/zlib_dfltcc/ |
D | dfltcc_util.h | 43 size_t t5 = len2 ? *len2 : 0; in dfltcc() local 49 register size_t r5 __asm__("r5") = t5; in dfltcc() 64 t2 = r2; t3 = r3; t4 = r4; t5 = r5; in dfltcc() 97 *len2 = t5; in dfltcc()
|
/linux-6.12.1/arch/powerpc/crypto/ |
D | ghashp10-ppc.pl | 60 my ($t4,$t5,$t6) = ($Hl,$H,$Hh); 196 vsldoi $t5,$zero,$Xm1,8 200 vxor $Xh1,$Xh1,$t5 208 vsldoi $t5,$Xl1,$Xl1,8 # 2nd reduction phase 212 vxor $t5,$t5,$Xh1 214 vxor $Xl1,$Xl1,$t5
|
/linux-6.12.1/crypto/ |
D | ecc.c | 1134 u64 t5[ECC_MAX_DIGITS]; in ecc_point_double_jacobian() local 1144 vli_mod_mult_fast(t5, x1, t4, curve); in ecc_point_double_jacobian() 1178 vli_mod_sub(z1, z1, t5, curve_prime, ndigits); in ecc_point_double_jacobian() 1180 vli_mod_sub(z1, z1, t5, curve_prime, ndigits); in ecc_point_double_jacobian() 1182 vli_mod_sub(t5, t5, z1, curve_prime, ndigits); in ecc_point_double_jacobian() 1184 vli_mod_mult_fast(x1, x1, t5, curve); in ecc_point_double_jacobian() 1235 u64 t5[ECC_MAX_DIGITS]; in xycz_add() local 1240 vli_mod_sub(t5, x2, x1, curve_prime, ndigits); in xycz_add() 1242 vli_mod_square_fast(t5, t5, curve); in xycz_add() 1244 vli_mod_mult_fast(x1, x1, t5, curve); in xycz_add() [all …]
|
/linux-6.12.1/arch/loongarch/mm/ |
D | page.S | 49 ld.d t5, a1, 40 62 st.d t5, a0, 40 64 ld.d t5, a1, 104 77 st.d t5, a0, -24
|
/linux-6.12.1/arch/riscv/include/asm/ |
D | compat.h | 70 compat_ulong_t t5; member 107 cregs->t5 = (compat_ulong_t) regs->t5; in regs_to_cregs() 144 regs->t5 = (unsigned long) cregs->t5; in cregs_to_regs()
|
/linux-6.12.1/scripts/ |
D | makelst | 28 t5=`field 1 $t1` 29 t6=`printf "%lu" $((0x$t4 - 0x$t5))`
|
/linux-6.12.1/arch/mips/kernel/ |
D | scall32-o32.S | 61 load_a4: user_lw(t5, 16(t0)) # argument #5 from usp 67 sw t5, 16(sp) # argument #5 to ksp 157 li t5, 0 196 lw t5, 24(sp) 199 sw t5, 20(sp)
|
/linux-6.12.1/arch/riscv/kernel/ |
D | mcount.S | 96 REG_L t5, 0(t3) 97 bne t5, t4, .Ldo_trace 126 jalr t5
|
/linux-6.12.1/arch/x86/crypto/ |
D | camellia-aesni-avx2-asm_64.S | 62 #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 69 vbroadcasti128 .Lpre_tf_lo_s1(%rip), t5; \ 86 filter_8bit(x0, t5, t6, t7, t4); \ 87 filter_8bit(x7, t5, t6, t7, t4); \ 94 filter_8bit(x2, t5, t6, t7, t4); \ 95 filter_8bit(x5, t5, t6, t7, t4); \ 96 filter_8bit(x1, t5, t6, t7, t4); \ 97 filter_8bit(x4, t5, t6, t7, t4); \ 103 vextracti128 $1, x5, t5##_x; \ 124 vaesenclast t4##_x, t5##_x, t5##_x; \ [all …]
|
D | camellia-aesni-avx-asm_64.S | 50 #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 107 vmovdqa .Lpost_tf_hi_s2(%rip), t5; \ 115 filter_8bit(x1, t4, t5, t7, t2); \ 116 filter_8bit(x4, t4, t5, t7, t2); \ 118 vpsrldq $5, t0, t5; \ 128 vpsrldq $2, t5, t7; \ 164 vpsrldq $1, t5, t3; \ 165 vpshufb t6, t5, t5; \ 180 vpxor t5, x2, x2; \
|
/linux-6.12.1/arch/x86/include/asm/ |
D | syscall_wrapper.h | 63 #define SYSCALL_PT_ARG6(m, t1, t2, t3, t4, t5, t6) \ argument 64 SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5), m(t6, (regs->bp)) 65 #define SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5) \ argument 66 SYSCALL_PT_ARG4(m, t1, t2, t3, t4), m(t5, (regs->di))
|
/linux-6.12.1/arch/riscv/kvm/ |
D | vcpu_switch.S | 49 REG_L t5, (KVM_ARCH_GUEST_SEPC)(a0) 67 csrw CSR_SEPC, t5 105 REG_L t5, (KVM_ARCH_GUEST_T5)(a0) 149 REG_S t5, (KVM_ARCH_GUEST_T5)(a0) 157 REG_L t5, (KVM_ARCH_HOST_SSTATUS)(a0) 175 csrrw t5, CSR_SSTATUS, t5 182 REG_S t5, (KVM_ARCH_GUEST_SSTATUS)(a0)
|
/linux-6.12.1/arch/loongarch/kernel/ |
D | rethook_trampoline.S | 23 cfi_st t5, PT_R17 58 cfi_ld t5, PT_R17
|
/linux-6.12.1/arch/arm/crypto/ |
D | aes-neonbs-core.S | 297 t0, t1, t2, t3, t4, t5, t6, t7, inv 307 vext.8 \t5, \x5, \x5, #12 310 veor \x5, \x5, \t5 320 veor \t5, \t5, \x4 335 veor \x7, \t1, \t5 353 t0, t1, t2, t3, t4, t5, t6, t7 358 vld1.8 {\t4-\t5}, [bskey, :256]! 364 veor \x5, \x5, \t5 379 vext.8 \t5, \x5, \x5, #8 382 veor \t5, \t5, \x5 [all …]
|