Lines Matching +full:11 +full:- +full:14

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 # Copyright 2023- IBM Corp. All rights reserved
10 # Poly1305 - this version mainly using vector/VSX/Scalar
11 # - 26 bits limbs
12 # - Handle multiple 64 byte blcok.
17 # p = 2^130 - 5
25 # 07/22/21 - this revison based on the above sum of products. Setup r^4, r^3, r^2, r and s3, s2, …
56 #include <asm/asm-offsets.h>
57 #include <asm/asm-compat.h>
95 stdu 1,-752(1)
97 SAVE_GPR 14, 112, 1
130 SAVE_VSX 14, 192, 9
165 RESTORE_VSX 14, 192, 9
184 RESTORE_GPR 14, 112, 1
220 vmulouw 14, 4, 26
222 vmulouw 11, 6, 2
226 vaddudm 14, 14, 10
227 vaddudm 14, 14, 11
229 vmulouw 11, 6, 3
230 vaddudm 14, 14, 12
231 vaddudm 14, 14, 13 # x0
233 vaddudm 15, 15, 11
240 vmulouw 11, 6, 26
242 vaddudm 16, 16, 11
249 vmulouw 11, 6, 27
251 vaddudm 17, 17, 11
258 vmulouw 11, 6, 28
260 vaddudm 18, 18, 11
270 vmuleuw 11, 6, 2
273 vaddudm 14, 14, 9
274 vaddudm 14, 14, 10
275 vaddudm 14, 14, 11
276 vaddudm 14, 14, 12
277 vaddudm 14, 14, 13 # x0
281 vmuleuw 11, 6, 3
286 vaddudm 15, 15, 11
292 vmuleuw 11, 6, 26
297 vaddudm 16, 16, 11
303 vmuleuw 11, 6, 27
308 vaddudm 17, 17, 11
314 vmuleuw 11, 6, 28
319 vaddudm 18, 18, 11
377 vsld 11, 29, 13
381 vaddudm 2, 11, 29
393 vsld 11, 29, 13
397 vaddudm 2, 11, 29
429 vsld 11, 29, 13
433 vaddudm 2, 11, 29
443 vsrd 10, 14, 31
444 vsrd 11, 17, 31
446 vand 4, 14, 25
447 vaddudm 18, 18, 11
451 vsrd 11, 15, 31
456 vaddudm 6, 16, 11
464 vsrd 11, 7, 31
468 vaddudm 8, 8, 11
479 ld 11, 0(10)
482 li 14, 16
486 lvx 25, 0, 10 # v25 - mask
487 lvx 31, 14, 10 # v31 = 1a
496 and. 9, 9, 11
500 extrdi 14, 9, 26, 38
503 mtvsrdd 58, 0, 14
504 insrdi 16, 10, 14, 38
548 extrdi 14, 9, 26, 38
551 mtvsrdd 36, 0, 14
552 insrdi 16, 10, 14, 38
568 vperm 14, 11, 12, 17
569 vperm 15, 11, 12, 18
570 vand 9, 14, 25 # a0
571 vsrd 10, 14, 31 # >> 26
572 vsrd 11, 10, 31 # 12 bits left
577 vor 11, 11, 12
578 vand 11, 11, 25 # a2
579 vspltisb 13, 14
580 vsrd 12, 15, 13 # >> 14
586 vaddudm 22, 6, 11
595 vperm 14, 11, 12, 17
596 vperm 15, 11, 12, 18
597 vand 9, 14, 25 # a0
598 vsrd 10, 14, 31 # >> 26
599 vsrd 11, 10, 31 # 12 bits left
604 vspltisb 13, 14
605 vor 11, 11, 12
606 vand 11, 11, 25 # a2
607 vsrd 12, 15, 13 # >> 14
614 vmrgow 6, 11, 22
619 addi 5, 5, -64 # len -= 64
633 # h3 = (h1 + m3) * r^2, h4 = (h2 + m4) * r^2 --> (h0 + m1) r*4 + (h3 + m3) r^2, (h0 + m2) r^4 + (h…
635 # h5 = (h3 + m5) * r^2, h6 = (h4 + m6) * r^2 -->
636 # h7 = (h5 + m7) * r^2, h8 = (h6 + m8) * r^1 --> m5 * r^4 + m6 * r^3 + m7 * r^2 + m8 * r
645 vsrd 10, 14, 31
646 vsrd 11, 17, 31
648 vand 4, 14, 25
649 vaddudm 18, 18, 11
653 vsrd 11, 15, 31
658 vaddudm 6, 16, 11
666 vsrd 11, 7, 31
670 vaddudm 8, 8, 11
679 vperm 14, 11, 12, 17
680 vperm 15, 11, 12, 18
685 vperm 17, 11, 12, 17
686 vperm 18, 11, 12, 18
688 vand 20, 14, 25 # a0
690 vsrd 21, 14, 31 # >> 26
693 vsrd 11, 10, 31 # 12 bits left
705 vor 11, 11, 12
706 vand 11, 11, 25 # a2
707 vspltisb 13, 14
708 vsrd 23, 15, 13 # >> 14
711 vsrd 12, 18, 13 # >> 14
724 vmrgow 6, 11, 6
729 addi 5, 5, -64 # len -= 64
752 vaddudm 4, 14, 9
757 vaddudm 6, 16, 11
769 vsrd 11, 7, 31
772 vaddudm 8, 8, 11
776 vsrd 11, 5, 31
781 vaddudm 6, 6, 11
789 vsrd 11, 7, 31
796 vaddudm 8, 8, 11
805 vspltisb 11, 12
806 vsrd 12, 6, 11
810 vspltisb 11, 14
811 vsld 7, 7, 11
814 vsld 8, 8, 11
846 ld 11, 0(10)
853 and. 9, 9, 11 # cramp mask r0
857 add 19, 21, 10 # s1: r19 - (r1 >> 2) *5
881 vmsumudm 11, 6, 1, 9 # h0 * r1, h1 * r0
882 vmsumudm 10, 8, 2, 11 # d1 += h2 * s1
885 vmsumudm 11, 8, 3, 9 # d2 = h2 * r0
899 mfvsrld 29, 32+11
923 # - no highbit if final leftover block (highbit = 0)
931 stdu 1,-400(1)
933 SAVE_GPR 14, 112, 1
957 add 11, 25, 4
975 ld 20, 0(11)
976 ld 21, 8(11)
977 addi 11, 11, 16
999 RESTORE_GPR 14, 112, 1
1035 ld 11, 8(3)
1039 # h + 5 + (-p)
1041 mr 7, 11
1050 mr 11, 7
1057 adde 11, 11, 7
1061 std 11, 8(5)