Lines Matching +full:2 +full:- +full:8

2 // Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
8 // it under the terms of the GNU General Public License version 2 as
14 // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
26 // General Public License (GPL) Version 2, available from the file
62 // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
69 .arch armv8-a+crypto
110 movi k32_48.2d, #0xffffffff
111 mov k32_48.h[2], k32_48.h[0]
112 ushr k00_16.2d, k32_48.2d, #32
116 movi perm4.8b, #8
117 dup perm1.2d, x5
119 ushr perm2.2d, perm1.2d, #8
120 ushr perm3.2d, perm1.2d, #16
121 ushr perm4.2d, perm1.2d, #24
122 sli perm2.2d, perm1.2d, #56
123 sli perm3.2d, perm1.2d, #48
124 sli perm4.2d, perm1.2d, #40
136 ext t4.8b, ad.8b, ad.8b, #1 // A1
137 ext t5.8b, ad.8b, ad.8b, #2 // A2
138 ext t6.8b, ad.8b, ad.8b, #3 // A3
140 pmull t4.8h, t4.8b, fold_consts.8b // F = A1*B
141 pmull t8.8h, ad.8b, bd1.8b // E = A*B1
142 pmull t5.8h, t5.8b, fold_consts.8b // H = A2*B
143 pmull t7.8h, ad.8b, bd2.8b // G = A*B2
144 pmull t6.8h, t6.8b, fold_consts.8b // J = A3*B
145 pmull t9.8h, ad.8b, bd3.8b // I = A*B3
146 pmull t3.8h, ad.8b, bd4.8b // K = A*B4
154 pmull2 t4.8h, t4.16b, fold_consts.16b // F = A1*B
155 pmull2 t8.8h, ad.16b, bd1.16b // E = A*B1
156 pmull2 t5.8h, t5.16b, fold_consts.16b // H = A2*B
157 pmull2 t7.8h, ad.16b, bd2.16b // G = A*B2
158 pmull2 t6.8h, t6.16b, fold_consts.16b // J = A3*B
159 pmull2 t9.8h, ad.16b, bd3.16b // I = A*B3
160 pmull2 t3.8h, ad.16b, bd4.16b // K = A*B4
166 uzp1 t8.2d, t4.2d, t5.2d
167 uzp2 t4.2d, t4.2d, t5.2d
168 uzp1 t7.2d, t6.2d, t3.2d
169 uzp2 t6.2d, t6.2d, t3.2d
171 // t4 = (L) (P0 + P1) << 8
184 zip2 t5.2d, t8.2d, t4.2d
185 zip1 t4.2d, t8.2d, t4.2d
186 zip2 t3.2d, t7.2d, t6.2d
187 zip1 t6.2d, t7.2d, t6.2d
205 pmull \rq\().8h, \ad\().8b, \bd\().8b // D = A*B
207 pmull2 \rq\().8h, \ad\().16b, \bd\().16b // D = A*B
221 __pmull_\p v8, \reg1, fold_consts, 2
227 __pmull_\p v9, \reg2, fold_consts, 2
230 CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 )
231 CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
242 __pmull_\p \src_reg, \src_reg, fold_consts, 2
244 ld1 {fold_consts.2d}, [fold_consts_ptr], #16
255 pmull2 \rd\().1q, \rn\().2d, \rm\().2d
283 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
284 CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
285 CPU_LE( ext v2.16b, v2.16b, v2.16b, #8 )
286 CPU_LE( ext v3.16b, v3.16b, v3.16b, #8 )
287 CPU_LE( ext v4.16b, v4.16b, v4.16b, #8 )
288 CPU_LE( ext v5.16b, v5.16b, v5.16b, #8 )
289 CPU_LE( ext v6.16b, v6.16b, v6.16b, #8 )
290 CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
298 ld1 {fold_consts.2d}, [fold_consts_ptr]
305 // While >= 128 data bytes remain (not counting v0-v7), fold the 128
306 // bytes v0-v7 into them, storing the result back into v0-v7.
316 // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7.
320 ld1 {fold_consts.2d}, [fold_consts_ptr], #16
336 adds len, len, #(128-16)
343 __pmull_\p v7, v7, fold_consts, 2
347 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
367 ldr q0, [buf, #-16]
369 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
371 // v1 = high order part of second chunk: v7 left-shifted by 'len' bytes.
377 // v3 = first chunk: v7 right-shifted by '16-len' bytes.
382 // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes.
385 // v2 = second chunk: 'len' bytes from v0 (low-order bytes),
386 // then '16-len' bytes from v1 (high-order bytes).
391 __pmull_\p v7, v3, fold_consts, 2
396 // Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC.
401 ld1 {fold_consts.2d}, [fold_consts_ptr], #16
405 // x^64. This produces a 128-bit value congruent to x^64 * M(x) and
407 ext v0.16b, v2.16b, v7.16b, #8
408 __pmull_\p v7, v7, fold_consts, 2 // high bits * x^48 * (x^80 mod G(x))
411 // Fold the high 32 bits into the low 96 bits. This produces a 96-bit
419 ld1 {fold_consts.2d}, [fold_consts_ptr]
423 __pmull_\p v1, v0, fold_consts, 2 // high 32 bits * floor(x^48 / G(x))
424 ushr v1.2d, v1.2d, #32 // /= x^32
426 ushr v0.2d, v0.2d, #48
444 CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
451 // Load the fold-across-16-bytes constants.
452 ld1 {fold_consts.2d}, [fold_consts_ptr], #16
487 // G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
489 .quad 0x0000000000006123 // x^(8*128) mod G(x)
490 .quad 0x0000000000002295 // x^(8*128+64) mod G(x)
495 .quad 0x000000000000857d // x^(2*128) mod G(x)
496 .quad 0x0000000000007acc // x^(2*128+64) mod G(x)
507 // For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 -
509 // ..., 0x80} XOR the index vector to shift right by '16 - len' bytes.