/linux-6.12.1/crypto/ |
D | sm4.c | 104 static inline u32 sm4_round(u32 x0, u32 x1, u32 x2, u32 x3, u32 rk) in sm4_round() argument 106 return x0 ^ sm4_enc_sub(x1 ^ x2 ^ x3 ^ rk); in sm4_round() 122 u32 rk[4]; in sm4_expandkey() local 129 rk[0] = get_unaligned_be32(&key[0]) ^ fk[0]; in sm4_expandkey() 130 rk[1] = get_unaligned_be32(&key[1]) ^ fk[1]; in sm4_expandkey() 131 rk[2] = get_unaligned_be32(&key[2]) ^ fk[2]; in sm4_expandkey() 132 rk[3] = get_unaligned_be32(&key[3]) ^ fk[3]; in sm4_expandkey() 135 rk[0] ^= sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i + 0]); in sm4_expandkey() 136 rk[1] ^= sm4_key_sub(rk[2] ^ rk[3] ^ rk[0] ^ ck[i + 1]); in sm4_expandkey() 137 rk[2] ^= sm4_key_sub(rk[3] ^ rk[0] ^ rk[1] ^ ck[i + 2]); in sm4_expandkey() [all …]
|
/linux-6.12.1/arch/arm64/crypto/ |
D | aes-neonbs-glue.c | 27 asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds); 29 asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], 31 asmlinkage void aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], 34 asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], 37 asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], 40 asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], 42 asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], 46 asmlinkage void neon_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[], 48 asmlinkage void neon_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[], 50 asmlinkage void neon_aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[], [all …]
|
D | aes-ce.S | 28 .macro load_round_keys, rk, nr, tmp 29 add \tmp, \rk, \nr, sxtw #4 31 ld1 {v17.4s-v20.4s}, [\rk] 38 .macro enc_prepare, rounds, rk, temp 39 load_round_keys \rk, \rounds, \temp 43 .macro enc_switch_key, rounds, rk, temp 44 load_round_keys \rk, \rounds, \temp 48 .macro dec_prepare, rounds, rk, temp 49 load_round_keys \rk, \rounds, \temp
|
D | aes-neon.S | 98 .macro do_block, enc, in, rounds, rk, rkp, i 99 ld1 {v15.4s}, [\rk] 100 add \rkp, \rk, #16 114 .macro encrypt_block, in, rounds, rk, rkp, i 115 do_block 1, \in, \rounds, \rk, \rkp, \i 118 .macro decrypt_block, in, rounds, rk, rkp, i 119 do_block 0, \in, \rounds, \rk, \rkp, \i 205 .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i 206 ld1 {v15.4s}, [\rk] 207 add \rkp, \rk, #16 [all …]
|
D | aes-cipher-core.S | 14 rk .req x0 57 ldp \out0, \out1, [rk], #8 87 ldp w8, w9, [rk], #16 88 ldp w10, w11, [rk, #-8]
|
D | aes-cipher-glue.c | 12 asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); 13 asmlinkage void __aes_arm64_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
|
D | aes-ce-ccm-glue.c | 35 asmlinkage u32 ce_aes_mac_update(u8 const in[], u32 const rk[], int rounds, 40 u32 const rk[], u32 rounds, u8 mac[], 44 u32 const rk[], u32 rounds, u8 mac[], 101 u32 macp, u32 const rk[], u32 rounds) in ce_aes_ccm_auth_data() argument 109 u32 rem = ce_aes_mac_update(in, rk, rounds, blocks, mac, in ce_aes_ccm_auth_data()
|
D | aes-ce-ccm-core.S | 17 .macro load_round_keys, rk, nr, tmp 19 add \tmp, \rk, w\tmp, sxtw #4 20 ld1 {v10.4s-v13.4s}, [\rk]
|
D | aes-ce-glue.c | 27 asmlinkage void __aes_ce_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); 28 asmlinkage void __aes_ce_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
|
D | aes-glue.c | 77 asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[], 79 asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[], 82 asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[], 84 asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[], 87 asmlinkage void aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[], 89 asmlinkage void aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[], 92 asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[], 95 asmlinkage void aes_xctr_encrypt(u8 out[], u8 const in[], u32 const rk[], 112 asmlinkage int aes_mac_update(u8 const in[], u32 const rk[], int rounds,
|
/linux-6.12.1/arch/arm/crypto/ |
D | aes-neonbs-glue.c | 28 asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds); 30 asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], 32 asmlinkage void aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], 35 asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], 38 asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], 41 asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], 43 asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], 48 u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32] __aligned(AES_BLOCK_SIZE); 71 struct crypto_aes_ctx rk; in aesbs_setkey() local 74 err = aes_expandkey(&rk, in_key, key_len); in aesbs_setkey() [all …]
|
D | sha2-ce-core.S | 18 rk .req r3 35 vld1.32 {k\ev}, [rk, :128]! 90 adr rk, .Lsha256_rcon 91 vld1.32 {k0}, [rk, :128]!
|
D | aes-cipher.h | 8 asmlinkage void __aes_arm_encrypt(const u32 rk[], int rounds, 10 asmlinkage void __aes_arm_decrypt(const u32 rk[], int rounds,
|
D | aes-cipher-core.S | 16 rk .req r0 84 ldm rk!, {t1, t2} 106 ldm rk!, {r8-r11}
|
D | aes-ce-glue.c | 29 asmlinkage void ce_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[], 31 asmlinkage void ce_aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[], 34 asmlinkage void ce_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[], 36 asmlinkage void ce_aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[], 38 asmlinkage void ce_aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[], 40 asmlinkage void ce_aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[], 43 asmlinkage void ce_aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
|
/linux-6.12.1/include/crypto/ |
D | aria.h | 407 static inline void aria_add_round_key(u32 *rk, u32 *t0, u32 *t1, u32 *t2, in aria_add_round_key() argument 410 *t0 ^= rk[0]; in aria_add_round_key() 411 *t1 ^= rk[1]; in aria_add_round_key() 412 *t2 ^= rk[2]; in aria_add_round_key() 413 *t3 ^= rk[3]; in aria_add_round_key() 434 static inline void aria_gsrk(u32 *rk, u32 *x, u32 *y, u32 n) in aria_gsrk() argument 439 rk[0] = (x[0]) ^ in aria_gsrk() 442 rk[1] = (x[1]) ^ in aria_gsrk() 445 rk[2] = (x[2]) ^ in aria_gsrk() 448 rk[3] = (x[3]) ^ in aria_gsrk()
|
D | sm4.h | 46 void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in);
|
/linux-6.12.1/arch/x86/crypto/ |
D | aria-gfni-avx512-asm_64.S | 272 t0, rk, round) \ 274 vpbroadcastb ((round * 16) + 3)(rk), t0; \ 276 vpbroadcastb ((round * 16) + 2)(rk), t0; \ 278 vpbroadcastb ((round * 16) + 1)(rk), t0; \ 280 vpbroadcastb ((round * 16) + 0)(rk), t0; \ 282 vpbroadcastb ((round * 16) + 7)(rk), t0; \ 284 vpbroadcastb ((round * 16) + 6)(rk), t0; \ 286 vpbroadcastb ((round * 16) + 5)(rk), t0; \ 288 vpbroadcastb ((round * 16) + 4)(rk), t0; \ 290 vpbroadcastb ((round * 16) + 11)(rk), t0; \ [all …]
|
D | aria-aesni-avx2-asm_64.S | 286 t0, rk, idx, round) \ 288 vpbroadcastb ((round * 16) + idx + 3)(rk), t0; \ 290 vpbroadcastb ((round * 16) + idx + 2)(rk), t0; \ 292 vpbroadcastb ((round * 16) + idx + 1)(rk), t0; \ 294 vpbroadcastb ((round * 16) + idx + 0)(rk), t0; \ 296 vpbroadcastb ((round * 16) + idx + 7)(rk), t0; \ 298 vpbroadcastb ((round * 16) + idx + 6)(rk), t0; \ 300 vpbroadcastb ((round * 16) + idx + 5)(rk), t0; \ 302 vpbroadcastb ((round * 16) + idx + 4)(rk), t0; \ 464 mem_tmp, rk, round) \ argument [all …]
|
D | aria-aesni-avx-asm_64.S | 270 t0, t1, t2, rk, \ 273 vbroadcastss ((round * 16) + idx + 0)(rk), t0; \ 285 vbroadcastss ((round * 16) + idx + 4)(rk), t0; \ 423 mem_tmp, rk, round) \ argument 426 y0, y7, y2, rk, 8, round); \ 441 y0, y7, y2, rk, 0, round); \ 478 mem_tmp, rk, round) \ argument 481 y0, y7, y2, rk, 8, round); \ 496 y0, y7, y2, rk, 0, round); \ 533 mem_tmp, rk, round, last_round) \ argument [all …]
|
D | sm4_aesni_avx2_glue.c | 22 asmlinkage void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst, 24 asmlinkage void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
|
D | sm4_aesni_avx_glue.c | 22 asmlinkage void sm4_aesni_avx_crypt4(const u32 *rk, u8 *dst, 24 asmlinkage void sm4_aesni_avx_crypt8(const u32 *rk, u8 *dst, 26 asmlinkage void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst, 28 asmlinkage void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst,
|
D | sm4-avx.h | 8 typedef void (*sm4_crypt_func)(const u32 *rk, u8 *dst, const u8 *src, u8 *iv);
|
/linux-6.12.1/arch/loongarch/include/asm/ |
D | inst.h | 343 unsigned int rk : 5; member 350 unsigned int rk : 5; member 507 u32 larch_insn_gen_or(enum loongarch_gpr rd, enum loongarch_gpr rj, enum loongarch_gpr rk); 708 enum loongarch_gpr rk) \ 713 insn->reg3_format.rk = rk; \ 759 enum loongarch_gpr rk, \ 766 insn->reg3sa2_format.rk = rk; \
|
/linux-6.12.1/Documentation/userspace-api/media/v4l/ |
D | metafmt-rkisp1.rst | 3 .. _v4l2-meta-fmt-rk-isp1-stat-3a: 21 .. _v4l2-meta-fmt-rk-isp1-params: 41 .. _v4l2-meta-fmt-rk-isp1-ext-params:
|