/linux-6.12.1/lib/crypto/ |
D | sha256.c | 54 static inline void LOAD_OP(int I, u32 *W, const u8 *input) in LOAD_OP() argument 56 W[I] = get_unaligned_be32((__u32 *)input + I); in LOAD_OP() 59 static inline void BLEND_OP(int I, u32 *W) in BLEND_OP() argument 61 W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; in BLEND_OP() 66 t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i]; \ 72 static void sha256_transform(u32 *state, const u8 *input, u32 *W) in sha256_transform() argument 79 LOAD_OP(i + 0, W, input); in sha256_transform() 80 LOAD_OP(i + 1, W, input); in sha256_transform() 81 LOAD_OP(i + 2, W, input); in sha256_transform() 82 LOAD_OP(i + 3, W, input); in sha256_transform() [all …]
|
D | sha1.c | 40 #define setW(x, val) (*(volatile __u32 *)&W(x) = (val)) 42 #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0) 44 #define setW(x, val) (W(x) = (val)) 48 #define W(x) (array[(x)&15]) macro 55 #define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1)
|
/linux-6.12.1/arch/powerpc/crypto/ |
D | sha1-powerpc-asm.S | 34 #define W(t) (((t)%16)+16) macro 37 LWZ(W(t),(t)*4,r4) 46 add r14,r0,W(t); \ 47 LWZ(W((t)+4),((t)+4)*4,r4); \ 58 xor r5,W((t)+4-3),W((t)+4-8); \ 60 xor W((t)+4),W((t)+4-16),W((t)+4-14); \ 61 add r0,r0,W(t); \ 62 xor W((t)+4),W((t)+4),r5; \ 64 rotlwi W((t)+4),W((t)+4),1 73 add r0,r0,W(t); \ [all …]
|
/linux-6.12.1/Documentation/translations/zh_TW/arch/loongarch/ |
D | introduction.rst | 205 ADD.W SUB.W ADDI.W ADD.D SUB.D ADDI.D 208 MUL.W MULH.W MULH.WU DIV.W DIV.WU MOD.W MOD.WU 211 LU12I.W LU32I.D LU52I.D ADDU16I.D 215 SLL.W SRL.W SRA.W ROTR.W SLLI.W SRLI.W SRAI.W ROTRI.W 220 EXT.W.B EXT.W.H CLO.W CLO.D SLZ.W CLZ.D CTO.W CTO.D CTZ.W CTZ.D 221 BYTEPICK.W BYTEPICK.D BSTRINS.W BSTRINS.D BSTRPICK.W BSTRPICK.D 222 REVB.2H REVB.4H REVB.2W REVB.D REVH.2W REVH.D BITREV.4B BITREV.8B BITREV.W BITREV.D 231 LD.B LD.BU LD.H LD.HU LD.W LD.WU LD.D ST.B ST.H ST.W ST.D 232 LDX.B LDX.BU LDX.H LDX.HU LDX.W LDX.WU LDX.D STX.B STX.H STX.W STX.D 233 LDPTR.W LDPTR.D STPTR.W STPTR.D [all …]
|
/linux-6.12.1/Documentation/translations/zh_CN/arch/loongarch/ |
D | introduction.rst | 205 ADD.W SUB.W ADDI.W ADD.D SUB.D ADDI.D 208 MUL.W MULH.W MULH.WU DIV.W DIV.WU MOD.W MOD.WU 211 LU12I.W LU32I.D LU52I.D ADDU16I.D 215 SLL.W SRL.W SRA.W ROTR.W SLLI.W SRLI.W SRAI.W ROTRI.W 220 EXT.W.B EXT.W.H CLO.W CLO.D SLZ.W CLZ.D CTO.W CTO.D CTZ.W CTZ.D 221 BYTEPICK.W BYTEPICK.D BSTRINS.W BSTRINS.D BSTRPICK.W BSTRPICK.D 222 REVB.2H REVB.4H REVB.2W REVB.D REVH.2W REVH.D BITREV.4B BITREV.8B BITREV.W BITREV.D 231 LD.B LD.BU LD.H LD.HU LD.W LD.WU LD.D ST.B ST.H ST.W ST.D 232 LDX.B LDX.BU LDX.H LDX.HU LDX.W LDX.WU LDX.D STX.B STX.H STX.W STX.D 233 LDPTR.W LDPTR.D STPTR.W STPTR.D [all …]
|
/linux-6.12.1/arch/x86/crypto/ |
D | sha1_ssse3_asm.S | 312 .set W, W0 define 320 .set W_minus_32, W 331 .set W_minus_04, W 332 .set W, W_minus_32 define 353 movdqa W_TMP1, W 375 movdqa W_minus_12, W 376 palignr $8, W_minus_16, W # w[i-14] 379 pxor W_minus_08, W 382 pxor W_TMP1, W 383 movdqa W, W_TMP2 [all …]
|
D | sha512-ssse3-asm.S | 99 # W[t]+K[t] (stack frame) 126 add WK_2(idx), T1 # W[t] + K[t] from message scheduler 130 add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h 132 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e) 155 # Two rounds are computed based on the values for K[t-2]+W[t-2] and 156 # K[t-1]+W[t-1] which were previously stored at WK_2 by the message 163 # Eg. XMM2=W[t-2] really means XMM2={W[t-2]|W[t-1]} 172 movdqa W_t(idx), %xmm2 # XMM2 = W[t-2] 175 movdqa %xmm2, %xmm0 # XMM0 = W[t-2] 180 movdqu W_t(idx), %xmm5 # XMM5 = W[t-15] [all …]
|
D | sha512-avx-asm.S | 78 # W[t] + K[t] | W[t+1] + K[t+1] 101 # W[t]+K[t] (stack frame) 132 add WK_2(idx), T1 # W[t] + K[t] from message scheduler 136 add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h 138 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e) 160 # Two rounds are computed based on the values for K[t-2]+W[t-2] and 161 # K[t-1]+W[t-1] which were previously stored at WK_2 by the message 168 # Eg. XMM4=W[t-2] really means XMM4={W[t-2]|W[t-1]} 173 vmovdqa W_t(idx), %xmm4 # XMM4 = W[t-2] 175 vmovdqu W_t(idx), %xmm5 # XMM5 = W[t-15] [all …]
|
D | sha512-avx2-asm.S | 167 MY_VPALIGNR YTMP0, Y_3, Y_2, 8 # YTMP0 = W[-7] 169 vpaddq Y_0, YTMP0, YTMP0 # YTMP0 = W[-7] + W[-16] 171 MY_VPALIGNR YTMP1, Y_1, Y_0, 8 # YTMP1 = W[-15] 178 vpor YTMP2, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 180 vpsrlq $7, YTMP1, YTMP4 # YTMP4 = W[-15] >> 7 224 vpor YTMP2, YTMP1, YTMP1 # YTMP1 = W[-15] ror 8 226 vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7 231 vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 233 vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA} 235 vpand MASK_YMM_LO(%rip), YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 {DC00} [all …]
|
D | sha256-ssse3-asm.S | 150 ## compute W[-16] + W[-7] 4 at a time 155 palignr $4, X2, XTMP0 # XTMP0 = W[-7] 163 paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16] 168 palignr $4, X0, XTMP1 # XTMP1 = W[-15] 172 movdqa XTMP1, XTMP2 # XTMP2 = W[-15] 176 movdqa XTMP1, XTMP3 # XTMP3 = W[-15] 187 por XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 192 movdqa XTMP3, XTMP2 # XTMP2 = W[-15] 195 movdqa XTMP3, XTMP4 # XTMP4 = W[-15] 212 psrld $3, XTMP4 # XTMP4 = W[-15] >> 3 [all …]
|
/linux-6.12.1/arch/x86/kernel/ |
D | uprobes.c | 47 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ macro 90 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */ 91 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */ 92 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ 93 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */ 94 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ 95 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ 96 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ 97 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */ 98 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ [all …]
|
/linux-6.12.1/tools/bpf/bpftool/bash-completion/ |
D | bpftool | 22 COMPREPLY+=( $( compgen -W "$w" -- "$cur" ) ) 44 COMPREPLY+=( $( compgen -W "$*" -- "$cur" ) ) 49 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \ 57 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \ 64 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \ 72 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \ 79 COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \ 85 COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \ 91 COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \ 97 COMPREPLY+=( $( compgen -W "$( bpftool -jp btf 2>&1 | \ [all …]
|
/linux-6.12.1/tools/memory-model/Documentation/ |
D | herd-representation.txt | 4 # W, a Store event 13 # W*, a Store event included in RMW 32 | WRITE_ONCE | W[once] | 36 | smp_store_release | W[release] | 38 | smp_store_mb | W[once] ->po F[mb] | 53 | rcu_assign_pointer | W[release] | 56 | srcu_read_unlock | W[srcu-unlock] | 63 | atomic_add | R*[noreturn] ->rmw W*[once] | 70 | | ->rmw W*[once] ->po F[mb] | 76 | atomic_add_return_relaxed | R*[once] ->rmw W*[once] | [all …]
|
D | cheatsheet.txt | 3 C Self R W RMW Self R W DR DW RMW SV 11 Successful *_release() C Y Y Y W Y 13 smp_wmb() Y W Y Y W 28 W: Write, for example, WRITE_ONCE(), or write portion of RMW
|
/linux-6.12.1/arch/arm/crypto/ |
D | sha1-armv7-neon.S | 88 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ argument 90 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 94 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 98 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 102 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ argument 104 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 108 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 111 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 115 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ argument 117 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ [all …]
|
/linux-6.12.1/crypto/ |
D | sha512_generic.c | 88 static inline void LOAD_OP(int I, u64 *W, const u8 *input) in LOAD_OP() argument 90 W[I] = get_unaligned_be64((__u64 *)input + I); in LOAD_OP() 93 static inline void BLEND_OP(int I, u64 *W) in BLEND_OP() argument 95 W[I & 15] += s1(W[(I-2) & 15]) + W[(I-7) & 15] + s0(W[(I-15) & 15]); in BLEND_OP() 104 u64 W[16]; in sha512_transform() local 118 LOAD_OP(i + j, W, input); in sha512_transform() 121 BLEND_OP(i + j, W); in sha512_transform() 126 t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)]; in sha512_transform() 128 t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1]; in sha512_transform() 130 t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2]; in sha512_transform() [all …]
|
D | sm3.c | 63 #define I(i) (W[i] = get_unaligned_be32(data + i * 4)) 64 #define W1(i) (W[i & 0x0f]) 65 #define W2(i) (W[i & 0x0f] = \ 66 P1(W[i & 0x0f] \ 67 ^ W[(i-9) & 0x0f] \ 68 ^ rol32(W[(i-3) & 0x0f], 15)) \ 69 ^ rol32(W[(i-13) & 0x0f], 7) \ 70 ^ W[(i-6) & 0x0f]) 72 static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16]) in sm3_transform() 170 u8 const *data, int blocks, u32 W[16]) in sm3_block() [all …]
|
/linux-6.12.1/Documentation/arch/loongarch/ |
D | introduction.rst | 238 ADD.W SUB.W ADDI.W ADD.D SUB.D ADDI.D 241 MUL.W MULH.W MULH.WU DIV.W DIV.WU MOD.W MOD.WU 244 LU12I.W LU32I.D LU52I.D ADDU16I.D 248 SLL.W SRL.W SRA.W ROTR.W SLLI.W SRLI.W SRAI.W ROTRI.W 253 EXT.W.B EXT.W.H CLO.W CLO.D SLZ.W CLZ.D CTO.W CTO.D CTZ.W CTZ.D 254 BYTEPICK.W BYTEPICK.D BSTRINS.W BSTRINS.D BSTRPICK.W BSTRPICK.D 255 REVB.2H REVB.4H REVB.2W REVB.D REVH.2W REVH.D BITREV.4B BITREV.8B BITREV.W BITREV.D 264 LD.B LD.BU LD.H LD.HU LD.W LD.WU LD.D ST.B ST.H ST.W ST.D 265 LDX.B LDX.BU LDX.H LDX.HU LDX.W LDX.WU LDX.D STX.B STX.H STX.W STX.D 266 LDPTR.W LDPTR.D STPTR.W STPTR.D [all …]
|
/linux-6.12.1/arch/arm/lib/ |
D | memmove.S | 82 6: W(nop) 83 W(ldr) r3, [r1, #-4]! 84 W(ldr) r4, [r1, #-4]! 85 W(ldr) r5, [r1, #-4]! 86 W(ldr) r6, [r1, #-4]! 87 W(ldr) r8, [r1, #-4]! 88 W(ldr) r9, [r1, #-4]! 89 W(ldr) lr, [r1, #-4]! 93 W(nop) 94 W(str) r3, [r0, #-4]! [all …]
|
/linux-6.12.1/tools/memory-model/ |
D | linux-kernel.def | 35 cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) 36 cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) 37 cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) 38 cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) 111 atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) 112 atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) 113 atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) 114 atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
|
/linux-6.12.1/arch/m68k/fpsp040/ |
D | slogn.S | 436 |--LET V=U*U, W=V*V, CALCULATE 438 |--U + U*V*( [B1 + W*(B3 + W*B5)] + [V*(B2 + W*B4)] ) 443 fmulx %fp1,%fp1 | ...FP1 IS W 448 fmulx %fp1,%fp3 | ...W*B5 449 fmulx %fp1,%fp2 | ...W*B4 451 faddd LOGB3,%fp3 | ...B3+W*B5 452 faddd LOGB2,%fp2 | ...B2+W*B4 454 fmulx %fp3,%fp1 | ...W*(B3+W*B5), FP3 RELEASED 456 fmulx %fp0,%fp2 | ...V*(B2+W*B4) 458 faddd LOGB1,%fp1 | ...B1+W*(B3+W*B5) [all …]
|
/linux-6.12.1/arch/arm/boot/compressed/ |
D | head.S | 213 W(b) 1f 1014 W(b) __armv4_mmu_cache_on 1015 W(b) __armv4_mmu_cache_off 1021 W(b) __armv3_mpu_cache_on 1022 W(b) __armv3_mpu_cache_off 1023 W(b) __armv3_mpu_cache_flush 1027 W(b) __armv4_mpu_cache_on 1028 W(b) __armv4_mpu_cache_off 1029 W(b) __armv4_mpu_cache_flush 1033 W(b) __arm926ejs_mmu_cache_on [all …]
|
/linux-6.12.1/arch/arm/kernel/ |
D | entry-armv.S | 906 3: W(b) . + 4 1075 W(b) vector_rst 1076 W(b) vector_und 1079 W(ldr) pc, . 1080 W(b) vector_pabt 1081 W(b) vector_dabt 1082 W(b) vector_addrexcptn 1083 W(b) vector_irq 1084 W(b) vector_fiq 1089 W(b) vector_rst [all …]
|
/linux-6.12.1/arch/mips/n64/ |
D | init.c | 51 #define W 320 macro 82 .width = W, in n64_platform_init() 84 .stride = W * 2, in n64_platform_init() 122 orig = kzalloc(W * H * 2 + 63, GFP_DMA | GFP_KERNEL); in n64_platform_init() 141 res[0].end = phys + W * H * 2 - 1; in n64_platform_init() 149 #undef W
|
/linux-6.12.1/drivers/atm/ |
D | Kconfig | 86 when going from 8W to 16W bursts. 89 bool "Enable 16W TX bursts (discouraged)" 96 bool "Enable 8W TX bursts (recommended)" 103 bool "Enable 4W TX bursts (optional)" 107 this if you have disabled 8W bursts. Enabling 4W if 8W is also set 111 bool "Enable 2W TX bursts (optional)" 115 this if you have disabled 4W and 8W bursts. Enabling 2W if 4W or 8W 119 bool "Enable 16W RX bursts (discouraged)" 126 bool "Enable 8W RX bursts (discouraged)" 134 bool "Enable 4W RX bursts (recommended)" [all …]
|