Lines Matching +full:mod +full:- +full:12 +full:b

2 // Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
14 // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
62 // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
69 .arch armv8-a+crypto
116 movi perm4.8b, #8
118 eor perm1.16b, perm1.16b, perm4.16b
128 tbl bd1.16b, {\bd\().16b}, perm1.16b
129 tbl bd2.16b, {\bd\().16b}, perm2.16b
130 tbl bd3.16b, {\bd\().16b}, perm3.16b
131 tbl bd4.16b, {\bd\().16b}, perm4.16b
136 ext t4.8b, ad.8b, ad.8b, #1 // A1
137 ext t5.8b, ad.8b, ad.8b, #2 // A2
138 ext t6.8b, ad.8b, ad.8b, #3 // A3
140 pmull t4.8h, t4.8b, fold_consts.8b // F = A1*B
141 pmull t8.8h, ad.8b, bd1.8b // E = A*B1
142 pmull t5.8h, t5.8b, fold_consts.8b // H = A2*B
143 pmull t7.8h, ad.8b, bd2.8b // G = A*B2
144 pmull t6.8h, t6.8b, fold_consts.8b // J = A3*B
145 pmull t9.8h, ad.8b, bd3.8b // I = A*B3
146 pmull t3.8h, ad.8b, bd4.8b // K = A*B4
147 b 0f
150 tbl t4.16b, {ad.16b}, perm1.16b // A1
151 tbl t5.16b, {ad.16b}, perm2.16b // A2
152 tbl t6.16b, {ad.16b}, perm3.16b // A3
154 pmull2 t4.8h, t4.16b, fold_consts.16b // F = A1*B
155 pmull2 t8.8h, ad.16b, bd1.16b // E = A*B1
156 pmull2 t5.8h, t5.16b, fold_consts.16b // H = A2*B
157 pmull2 t7.8h, ad.16b, bd2.16b // G = A*B2
158 pmull2 t6.8h, t6.16b, fold_consts.16b // J = A3*B
159 pmull2 t9.8h, ad.16b, bd3.16b // I = A*B3
160 pmull2 t3.8h, ad.16b, bd4.16b // K = A*B4
162 0: eor t4.16b, t4.16b, t8.16b // L = E + F
163 eor t5.16b, t5.16b, t7.16b // M = G + H
164 eor t6.16b, t6.16b, t9.16b // N = I + J
173 eor t8.16b, t8.16b, t4.16b
174 and t4.16b, t4.16b, k32_48.16b
178 eor t7.16b, t7.16b, t6.16b
179 and t6.16b, t6.16b, k00_16.16b
181 eor t8.16b, t8.16b, t4.16b
182 eor t7.16b, t7.16b, t6.16b
189 ext t4.16b, t4.16b, t4.16b, #15
190 ext t5.16b, t5.16b, t5.16b, #14
191 ext t6.16b, t6.16b, t6.16b, #13
192 ext t3.16b, t3.16b, t3.16b, #12
194 eor t4.16b, t4.16b, t5.16b
195 eor t6.16b, t6.16b, t3.16b
203 mov ad.16b, \ad\().16b
205 pmull \rq\().8h, \ad\().8b, \bd\().8b // D = A*B
207 pmull2 \rq\().8h, \ad\().16b, \bd\().16b // D = A*B
212 eor \rq\().16b, \rq\().16b, t4.16b
213 eor \rq\().16b, \rq\().16b, t6.16b
224 CPU_LE( rev64 v11.16b, v11.16b )
225 CPU_LE( rev64 v12.16b, v12.16b )
230 CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 )
231 CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
233 eor \reg1\().16b, \reg1\().16b, v8.16b
234 eor \reg2\().16b, \reg2\().16b, v9.16b
235 eor \reg1\().16b, \reg1\().16b, v11.16b
236 eor \reg2\().16b, \reg2\().16b, v12.16b
247 eor \dst_reg\().16b, \dst_reg\().16b, v8.16b
248 eor \dst_reg\().16b, \dst_reg\().16b, \src_reg\().16b
264 b.lt .Lless_than_256_bytes_\@
275 CPU_LE( rev64 v0.16b, v0.16b )
276 CPU_LE( rev64 v1.16b, v1.16b )
277 CPU_LE( rev64 v2.16b, v2.16b )
278 CPU_LE( rev64 v3.16b, v3.16b )
279 CPU_LE( rev64 v4.16b, v4.16b )
280 CPU_LE( rev64 v5.16b, v5.16b )
281 CPU_LE( rev64 v6.16b, v6.16b )
282 CPU_LE( rev64 v7.16b, v7.16b )
283 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
284 CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
285 CPU_LE( ext v2.16b, v2.16b, v2.16b, #8 )
286 CPU_LE( ext v3.16b, v3.16b, v3.16b, #8 )
287 CPU_LE( ext v4.16b, v4.16b, v4.16b, #8 )
288 CPU_LE( ext v5.16b, v5.16b, v5.16b, #8 )
289 CPU_LE( ext v6.16b, v6.16b, v6.16b, #8 )
290 CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
293 movi v8.16b, #0
295 eor v0.16b, v0.16b, v8.16b
305 // While >= 128 data bytes remain (not counting v0-v7), fold the 128
306 // bytes v0-v7 into them, storing the result back into v0-v7.
314 b.ge .Lfold_128_bytes_loop_\@
316 // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7.
336 adds len, len, #(128-16)
340 b.lt .Lfold_16_bytes_loop_done_\@
344 eor v7.16b, v7.16b, v8.16b
346 CPU_LE( rev64 v0.16b, v0.16b )
347 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
348 eor v7.16b, v7.16b, v0.16b
350 b.ge .Lfold_16_bytes_loop_\@
356 b.eq .Lreduce_final_16_bytes_\@
367 ldr q0, [buf, #-16]
368 CPU_LE( rev64 v0.16b, v0.16b )
369 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
371 // v1 = high order part of second chunk: v7 left-shifted by 'len' bytes.
374 ld1 {v2.16b}, [x4]
375 tbl v1.16b, {v7.16b}, v2.16b
377 // v3 = first chunk: v7 right-shifted by '16-len' bytes.
378 movi v3.16b, #0x80
379 eor v2.16b, v2.16b, v3.16b
380 tbl v3.16b, {v7.16b}, v2.16b
382 // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes.
383 sshr v2.16b, v2.16b, #7
385 // v2 = second chunk: 'len' bytes from v0 (low-order bytes),
386 // then '16-len' bytes from v1 (high-order bytes).
387 bsl v2.16b, v1.16b, v0.16b
392 eor v7.16b, v7.16b, v0.16b
393 eor v7.16b, v7.16b, v2.16b
396 // Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC.
398 movi v2.16b, #0 // init zero register
400 // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
405 // x^64. This produces a 128-bit value congruent to x^64 * M(x) and
407 ext v0.16b, v2.16b, v7.16b, #8
408 __pmull_\p v7, v7, fold_consts, 2 // high bits * x^48 * (x^80 mod G(x))
409 eor v0.16b, v0.16b, v7.16b // + low bits * x^64
411 // Fold the high 32 bits into the low 96 bits. This produces a 96-bit
413 ext v1.16b, v0.16b, v2.16b, #12 // extract high 32 bits
415 __pmull_\p v1, v1, fold_consts // high 32 bits * x^48 * (x^48 mod G(x))
416 eor v0.16b, v0.16b, v1.16b // + low bits
427 eor v0.16b, v0.16b, v1.16b // + low 16 nonzero bits
428 // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0.
443 CPU_LE( rev64 v7.16b, v7.16b )
444 CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
447 movi v0.16b, #0
449 eor v7.16b, v7.16b, v0.16b
451 // Load the fold-across-16-bytes constants.
456 b.eq .Lreduce_final_16_bytes_\@ // len == 16
458 b.ge .Lfold_16_bytes_loop_\@ // 32 <= len <= 255
460 b .Lhandle_partial_segment_\@ // 17 <= len <= 31
489 .quad 0x0000000000006123 // x^(8*128) mod G(x)
490 .quad 0x0000000000002295 // x^(8*128+64) mod G(x)
492 .quad 0x0000000000001069 // x^(4*128) mod G(x)
493 .quad 0x000000000000dd31 // x^(4*128+64) mod G(x)
495 .quad 0x000000000000857d // x^(2*128) mod G(x)
496 .quad 0x0000000000007acc // x^(2*128+64) mod G(x)
498 .quad 0x000000000000a010 // x^(1*128) mod G(x)
499 .quad 0x0000000000001faa // x^(1*128+64) mod G(x)
501 .quad 0x1368000000000000 // x^48 * (x^48 mod G(x))
502 .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x))
507 // For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 -
509 // ..., 0x80} XOR the index vector to shift right by '16 - len' bytes.