Lines Matching +full:ext +full:- +full:32 +full:k

2 // Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
14 // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
62 // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
69 .arch armv8-a+crypto
112 ushr k00_16.2d, k32_48.2d, #32
136 ext t4.8b, ad.8b, ad.8b, #1 // A1
137 ext t5.8b, ad.8b, ad.8b, #2 // A2
138 ext t6.8b, ad.8b, ad.8b, #3 // A3
146 pmull t3.8h, ad.8b, bd4.8b // K = A*B4
160 pmull2 t3.8h, ad.16b, bd4.16b // K = A*B4
177 // t7 = (K) (P6 + P7) << 32
189 ext t4.16b, t4.16b, t4.16b, #15
190 ext t5.16b, t5.16b, t5.16b, #14
191 ext t6.16b, t6.16b, t6.16b, #13
192 ext t3.16b, t3.16b, t3.16b, #12
216 // Fold reg1, reg2 into the next 32 data bytes, storing the result back
230 CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 )
231 CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
283 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
284 CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
285 CPU_LE( ext v2.16b, v2.16b, v2.16b, #8 )
286 CPU_LE( ext v3.16b, v3.16b, v3.16b, #8 )
287 CPU_LE( ext v4.16b, v4.16b, v4.16b, #8 )
288 CPU_LE( ext v5.16b, v5.16b, v5.16b, #8 )
289 CPU_LE( ext v6.16b, v6.16b, v6.16b, #8 )
290 CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
305 // While >= 128 data bytes remain (not counting v0-v7), fold the 128
306 // bytes v0-v7 into them, storing the result back into v0-v7.
316 // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7.
326 // Fold across 32 bytes.
336 adds len, len, #(128-16)
347 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
367 ldr q0, [buf, #-16]
369 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
371 // v1 = high order part of second chunk: v7 left-shifted by 'len' bytes.
377 // v3 = first chunk: v7 right-shifted by '16-len' bytes.
382 // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes.
385 // v2 = second chunk: 'len' bytes from v0 (low-order bytes),
386 // then '16-len' bytes from v1 (high-order bytes).
396 // Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC.
405 // x^64. This produces a 128-bit value congruent to x^64 * M(x) and
407 ext v0.16b, v2.16b, v7.16b, #8
411 // Fold the high 32 bits into the low 96 bits. This produces a 96-bit
413 ext v1.16b, v0.16b, v2.16b, #12 // extract high 32 bits
414 mov v0.s[3], v2.s[0] // zero high 32 bits
415 __pmull_\p v1, v1, fold_consts // high 32 bits * x^48 * (x^48 mod G(x))
423 __pmull_\p v1, v0, fold_consts, 2 // high 32 bits * floor(x^48 / G(x))
424 ushr v1.2d, v1.2d, #32 // /= x^32
444 CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
451 // Load the fold-across-16-bytes constants.
457 subs len, len, #32
458 b.ge .Lfold_16_bytes_loop_\@ // 32 <= len <= 255
507 // For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 -
509 // ..., 0x80} XOR the index vector to shift right by '16 - len' bytes.