/linux-6.12.1/arch/arm64/crypto/ |
D | sm4-ce-asm.h | 28 #define SM4_CRYPT_BLK2_BE(b0, b1) \ argument 30 sm4e b1.4s, v24.4s; \ 32 sm4e b1.4s, v25.4s; \ 34 sm4e b1.4s, v26.4s; \ 36 sm4e b1.4s, v27.4s; \ 38 sm4e b1.4s, v28.4s; \ 40 sm4e b1.4s, v29.4s; \ 42 sm4e b1.4s, v30.4s; \ 44 sm4e b1.4s, v31.4s; \ 46 rev64 b1.4s, b1.4s; \ [all …]
|
D | sm4-neon-core.S | 131 #define SM4_CRYPT_BLK4_BE(b0, b1, b2, b3) \ argument 137 ROUND4(0, b0, b1, b2, b3); \ 138 ROUND4(1, b1, b2, b3, b0); \ 139 ROUND4(2, b2, b3, b0, b1); \ 140 ROUND4(3, b3, b0, b1, b2); \ 145 rev32 b1.16b, b1.16b; \ 149 rotate_clockwise_4x4(b0, b1, b2, b3); \ 154 #define SM4_CRYPT_BLK4(b0, b1, b2, b3) \ argument 156 rev32 b1.16b, b1.16b; \ 159 SM4_CRYPT_BLK4_BE(b0, b1, b2, b3); [all …]
|
D | aes-neonbs-core.S | 26 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 27 eor \b2, \b2, \b1 38 eor \b3, \b3, \b1 39 eor \b1, \b1, \b5 42 .macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 44 eor \b1, \b1, \b4 47 eor \b6, \b6, \b1 48 eor \b1, \b1, \b5 56 .macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5 57 eor \b1, \b1, \b7 [all …]
|
D | sm4-ce-gcm-core.S | 133 #define SM4_CRYPT_PMUL_128x128_BLK3(b0, b1, b2, \ argument 138 rev32 b1.16b, b1.16b; \ 144 sm4e b1.4s, v24.4s; \ 150 sm4e b1.4s, v25.4s; \ 156 sm4e b1.4s, v26.4s; \ 162 sm4e b1.4s, v27.4s; \ 168 sm4e b1.4s, v28.4s; \ 174 sm4e b1.4s, v29.4s; \ 180 sm4e b1.4s, v30.4s; \ 186 sm4e b1.4s, v31.4s; \ [all …]
|
/linux-6.12.1/crypto/ |
D | aes_generic.c | 1179 u32 b0[4], b1[4]; in crypto_aes_encrypt() local 1189 f_nround(b1, b0, kp); in crypto_aes_encrypt() 1190 f_nround(b0, b1, kp); in crypto_aes_encrypt() 1194 f_nround(b1, b0, kp); in crypto_aes_encrypt() 1195 f_nround(b0, b1, kp); in crypto_aes_encrypt() 1198 f_nround(b1, b0, kp); in crypto_aes_encrypt() 1199 f_nround(b0, b1, kp); in crypto_aes_encrypt() 1200 f_nround(b1, b0, kp); in crypto_aes_encrypt() 1201 f_nround(b0, b1, kp); in crypto_aes_encrypt() 1202 f_nround(b1, b0, kp); in crypto_aes_encrypt() [all …]
|
D | xor.c | 83 do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) in do_xor_speed() argument 101 tmpl->do_2(BENCH_SIZE, b1, b2); in do_xor_speed() 118 void *b1, *b2; in calibrate_xor_blocks() local 130 b1 = (void *) __get_free_pages(GFP_KERNEL, 2); in calibrate_xor_blocks() 131 if (!b1) { in calibrate_xor_blocks() 135 b2 = b1 + 2*PAGE_SIZE + BENCH_SIZE; in calibrate_xor_blocks() 142 #define xor_speed(templ) do_xor_speed((templ), b1, b2) in calibrate_xor_blocks() 157 free_pages((unsigned long)b1, 2); in calibrate_xor_blocks()
|
/linux-6.12.1/drivers/atm/ |
D | fore200e.h | 71 #define BITFIELD2(b1, b2) b1; b2; argument 72 #define BITFIELD3(b1, b2, b3) b1; b2; b3; argument 73 #define BITFIELD4(b1, b2, b3, b4) b1; b2; b3; b4; argument 74 #define BITFIELD5(b1, b2, b3, b4, b5) b1; b2; b3; b4; b5; argument 75 #define BITFIELD6(b1, b2, b3, b4, b5, b6) b1; b2; b3; b4; b5; b6; argument 77 #define BITFIELD2(b1, b2) b2; b1; argument 78 #define BITFIELD3(b1, b2, b3) b3; b2; b1; argument 79 #define BITFIELD4(b1, b2, b3, b4) b4; b3; b2; b1; argument 80 #define BITFIELD5(b1, b2, b3, b4, b5) b5; b4; b3; b2; b1; argument 81 #define BITFIELD6(b1, b2, b3, b4, b5, b6) b6; b5; b4; b3; b2; b1; argument
|
/linux-6.12.1/Documentation/arch/arm64/ |
D | elf_hwcaps.rst | 252 Functionality implied by ID_AA64SMFR0_EL1.F64F64 == 0b1. 258 Functionality implied by ID_AA64SMFR0_EL1.F16F32 == 0b1. 261 Functionality implied by ID_AA64SMFR0_EL1.B16F32 == 0b1. 264 Functionality implied by ID_AA64SMFR0_EL1.F32F32 == 0b1. 267 Functionality implied by ID_AA64SMFR0_EL1.FA64 == 0b1. 297 Functionality implied by ID_AA64SMFR0_EL1.BI32I32 == 0b1 300 Functionality implied by ID_AA64SMFR0_EL1.B16B16 == 0b1 303 Functionality implied by ID_AA64SMFR0_EL1.F16F16 == 0b1 330 Functionality implied by ID_AA64FPFR0_EL1.F8CVT == 0b1. 333 Functionality implied by ID_AA64FPFR0_EL1.F8FMA == 0b1. [all …]
|
D | booting.rst | 221 - SCR_EL3.HCE (bit 8) must be initialised to 0b1. 226 - ICC_SRE_EL3.Enable (bit 3) must be initialised to 0b1. 227 - ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b1. 234 - ICC.SRE_EL2.Enable (bit 3) must be initialised to 0b1 235 - ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b1. 256 - SCR_EL3.APK (bit 16) must be initialised to 0b1 257 - SCR_EL3.API (bit 17) must be initialised to 0b1 261 - HCR_EL2.APK (bit 40) must be initialised to 0b1 262 - HCR_EL2.API (bit 41) must be initialised to 0b1 272 having 0b1 set for the corresponding bit for each of the auxiliary [all …]
|
/linux-6.12.1/drivers/isdn/mISDN/ |
D | dsp_biquad.h | 19 int32_t b1; member 27 int32_t gain, int32_t a1, int32_t a2, int32_t b1, int32_t b2) in biquad2_init() argument 32 bq->b1 = b1; in biquad2_init() 45 y = z0 + bq->z1 * bq->b1 + bq->z2 * bq->b2; in biquad2()
|
/linux-6.12.1/fs/f2fs/ |
D | hash.c | 28 __u32 b0 = buf[0], b1 = buf[1]; in TEA_transform() local 34 b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); in TEA_transform() 35 b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); in TEA_transform() 39 buf[1] += b1; in TEA_transform()
|
/linux-6.12.1/arch/arm/include/asm/ |
D | xor.h | 26 : "=r" (src), "=r" (b1), "=r" (b2) \ 28 __XOR(a1, b1); __XOR(a2, b2); 32 : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \ 34 __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4) 55 register unsigned int b1 __asm__("r8"); in xor_arm4regs_2() 77 register unsigned int b1 __asm__("r8"); in xor_arm4regs_3() 99 register unsigned int b1 __asm__("ip"); in xor_arm4regs_4() 121 register unsigned int b1 __asm__("ip"); in xor_arm4regs_5()
|
/linux-6.12.1/fs/reiserfs/ |
D | hashes.c | 28 u32 b0, b1; \ 31 b1 = h1; \ 36 b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); \ 37 b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); \ 41 h1 += b1; \
|
/linux-6.12.1/drivers/crypto/nx/ |
D | nx-aes-ccm.c | 164 u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; in generate_pat() local 192 b1 = nx_ctx->priv.ccm.iauth_tag; in generate_pat() 199 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; in generate_pat() 203 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; in generate_pat() 216 if (b1) { in generate_pat() 217 memset(b1, 0, 16); in generate_pat() 219 *(u16 *)b1 = assoclen; in generate_pat() 220 scatterwalk_map_and_copy(b1 + 2, req->src, 0, in generate_pat() 223 *(u16 *)b1 = (u16)(0xfffe); in generate_pat() 224 *(u32 *)&b1[2] = assoclen; in generate_pat() [all …]
|
/linux-6.12.1/arch/arm/nwfpe/ |
D | softfloat-macros | 339 value formed by concatenating `b0' and `b1'. Addition is modulo 2^128, so 346 bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) 350 z1 = a1 + b1; 359 192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is 371 bits64 b1, 383 z1 = a1 + b1; 397 Subtracts the 128-bit value formed by concatenating `b0' and `b1' from the 406 bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) 409 *z1Ptr = a1 - b1; 410 *z0Ptr = a0 - b0 - ( a1 < b1 ); [all …]
|
/linux-6.12.1/arch/riscv/crypto/ |
D | chacha-riscv64-zvkb.S | 76 .macro chacha_round a0, b0, c0, d0, a1, b1, c1, d1, \ 80 vadd.vv \a1, \a1, \b1 98 vxor.vv \b1, \b1, \c1 102 vror.vi \b1, \b1, 32 - 12 108 vadd.vv \a1, \a1, \b1 126 vxor.vv \b1, \b1, \c1 130 vror.vi \b1, \b1, 32 - 7
|
/linux-6.12.1/tools/mm/ |
D | slabinfo.c | 806 char b1[20], b2[20], b3[20], b4[20]; in totals() local 982 store_size(b1, total_size);store_size(b2, total_waste); in totals() 984 printf("Memory used: %15s # Loss : %15s MRatio:%6s%%\n", b1, b2, b3); in totals() 986 store_size(b1, total_objects);store_size(b2, total_partobj); in totals() 988 printf("# Objects : %15s # PartObj: %15s ORatio:%6s%%\n", b1, b2, b3); in totals() 996 store_size(b1, avg_objects);store_size(b2, min_objects); in totals() 999 b1, b2, b3, b4); in totals() 1001 store_size(b1, avg_slabs);store_size(b2, min_slabs); in totals() 1004 b1, b2, b3, b4); in totals() 1006 store_size(b1, avg_partial);store_size(b2, min_partial); in totals() [all …]
|
/linux-6.12.1/arch/powerpc/kernel/vdso/ |
D | vgetrandom-chacha.S | 52 .macro quarterround4 a1 b1 c1 d1 a2 b2 c2 d2 a3 b3 c3 d3 a4 b4 c4 d4 53 add \a1, \a1, \b1 69 xor \b1, \b1, \c1 73 rotlwi \b1, \b1, 12 77 add \a1, \a1, \b1 93 xor \b1, \b1, \c1 97 rotlwi \b1, \b1, 7 103 #define QUARTERROUND4(a1,b1,c1,d1,a2,b2,c2,d2,a3,b3,c3,d3,a4,b4,c4,d4) \ argument 104 quarterround4 state##a1 state##b1 state##c1 state##d1 \
|
/linux-6.12.1/arch/arm/crypto/ |
D | aes-neonbs-core.S | 80 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 81 veor \b2, \b2, \b1 92 veor \b3, \b3, \b1 93 veor \b1, \b1, \b5 96 .macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 98 veor \b1, \b1, \b4 101 veor \b6, \b6, \b1 102 veor \b1, \b1, \b5 110 .macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5 111 veor \b1, \b1, \b7 [all …]
|
/linux-6.12.1/arch/xtensa/platforms/iss/include/platform/ |
D | simcall-iss.h | 61 register int b1 asm("a3") = b; in __simc() 66 : "+r"(a1), "+r"(b1) in __simc() 69 errno = b1; in __simc()
|
/linux-6.12.1/scripts/ |
D | parse-maintainers.pl | 79 my $b1 = uc(substr($b, 0, 1)); 82 my $b_index = index($preferred_order, $b1); 87 if (($a1 =~ /^F$/ && $b1 =~ /^F$/) || 88 ($a1 =~ /^X$/ && $b1 =~ /^X$/)) {
|
/linux-6.12.1/arch/s390/net/ |
D | bpf_jit_comp.c | 122 static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) in reg_set_seen() argument 124 u32 r1 = reg2hex[b1]; in reg_set_seen() 130 #define REG_SET_SEEN(b1) \ argument 132 reg_set_seen(jit, b1); \ 146 #define EMIT2(op, b1, b2) \ argument 148 _EMIT2((op) | reg(b1, b2)); \ 149 REG_SET_SEEN(b1); \ 160 #define EMIT4(op, b1, b2) \ argument 162 _EMIT4((op) | reg(b1, b2)); \ 163 REG_SET_SEEN(b1); \ [all …]
|
/linux-6.12.1/arch/x86/crypto/ |
D | cast6-avx-x86_64-asm_64.S | 129 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 130 F_head(b1, RX, RGI1, RGI2, op0); \ 133 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ 139 #define F1_2(a1, b1, a2, b2) \ argument 140 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 141 #define F2_2(a1, b1, a2, b2) \ argument 142 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 143 #define F3_2(a1, b1, a2, b2) \ argument 144 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
|
D | cast5-avx-x86_64-asm_64.S | 129 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 130 F_head(b1, RX, RGI1, RGI2, op0); \ 133 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ 139 #define F1_2(a1, b1, a2, b2) \ argument 140 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 141 #define F2_2(a1, b1, a2, b2) \ argument 142 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 143 #define F3_2(a1, b1, a2, b2) \ argument 144 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl) 146 #define subround(a1, b1, a2, b2, f) \ argument [all …]
|
/linux-6.12.1/drivers/mtd/nand/ |
D | ecc-sw-hamming.c | 378 unsigned char b0, b1, b2, bit_addr; in ecc_sw_hamming_correct() local 388 b1 = read_ecc[1] ^ calc_ecc[1]; in ecc_sw_hamming_correct() 391 b1 = read_ecc[0] ^ calc_ecc[0]; in ecc_sw_hamming_correct() 401 if ((b0 | b1 | b2) == 0) in ecc_sw_hamming_correct() 405 (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) && in ecc_sw_hamming_correct() 426 byte_addr = (addressbits[b1] << 4) + addressbits[b0]; in ecc_sw_hamming_correct() 429 (addressbits[b1] << 4) + addressbits[b0]; in ecc_sw_hamming_correct() 437 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) in ecc_sw_hamming_correct()
|