Lines Matching +full:0 +full:- +full:19
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 # Accelerated AES-GCM stitched implementation for ppc64le.
5 # Copyright 2022- IBM Inc. All rights reserved
22 # Hash keys = v3 - v14
29 # v31 - counter 1
32 # vs0 - vs14 for round keys
35 # This implementation uses stitched AES-GCM approach to improve overall performance.
48 # v15 - v18 - input states
49 # vs1 - vs9 - round keys
52 xxlor 19+32, 1, 1
57 vcipher 15, 15, 19
58 vcipher 16, 16, 19
59 vcipher 17, 17, 19
60 vcipher 18, 18, 19
77 xxlor 19+32, 5, 5
82 vcipher 15, 15, 19
83 vcipher 16, 16, 19
84 vcipher 17, 17, 19
85 vcipher 18, 18, 19
110 # v15 - v22 - input states
111 # vs1 - vs9 - round keys
123 vcipher 19, 19, 23
132 vcipher 19, 19, 24
141 vcipher 19, 19, 25
150 vcipher 19, 19, 26
164 vcipher 19, 19, 23
173 vcipher 19, 19, 24
182 vcipher 19, 19, 25
191 vcipher 19, 19, 26
201 vcipher 19, 19, 23
208 xxlor 19+32, 1, 1
213 vcipher 15, 15, 19
218 xxlor 19+32, 5, 5
223 vcipher 15, 15, 19
228 xxlor 19+32, 9, 9
229 vcipher 15, 15, 19
236 vxor 15, 15, 0
290 # v15 - v22 - input blocks
294 vxor 15, 15, 0 # Xi + X
347 vxor 19, 19, 27 # Xi + X
348 vpmsumd 23, 12, 19 # H4.L * X.L
354 vpmsumd 24, 13, 19 # H4.L * X.H + H4.H * X.L
375 vpmsumd 24, 14, 19 # H4.H * X.H
400 vxor 28, 28, 0
402 vxor 19, 19, 19
410 vsldoi 25, 23, 19, 8 # mL
411 vsldoi 26, 19, 23, 8 # mH
423 vmr 0, 22 # update hash
428 stdu 1,-640(1)
429 mflr 0
436 std 19,152(1)
468 stxv 19, 544(1)
472 std 0, 656(1)
481 lxv 19, 544(1)
510 ld 0, 656(1)
516 ld 19,152(1)
520 mtlr 0
526 lxvb16x 32, 0, 8 # load Xi
528 # load Hash - h^4, h^3, h^2, h
564 # r3 - inp
565 # r4 - out
566 # r5 - len
567 # r6 - AES round keys
568 # r7 - iv and other data
569 # r8 - Xi, HPoli, hash keys
572 # Xi is at 0 in gcm_table (Xip).
581 # initialize ICB: GHASH( IV ), IV - r7
582 lxvb16x 30+32, 0, 7 # load IV - v30
585 li 11, 0 # block index
593 lxv 0, 0(6)
594 lxv 1, 0x10(6)
595 lxv 2, 0x20(6)
596 lxv 3, 0x30(6)
597 lxv 4, 0x40(6)
598 lxv 5, 0x50(6)
599 lxv 6, 0x60(6)
600 lxv 7, 0x70(6)
601 lxv 8, 0x80(6)
602 lxv 9, 0x90(6)
603 lxv 10, 0xa0(6)
605 # load rounds - 10 (128), 12 (192), 14 (256)
610 xxlor 32+29, 0, 0
611 vxor 15, 30, 29 # IV + round key - add round key 0
617 lxv 11, 0xb0(6)
618 lxv 12, 0xc0(6)
624 lxv 13, 0xd0(6)
625 lxv 14, 0xe0(6)
641 cmpdi 15, 0
650 divdu 10, 12, 10 # n 128 bytes-blocks
651 cmpdi 10, 0
661 vxor 19, 30, 29
675 li 19, 80
683 lxvb16x 15, 0, 14 # load block
687 lxvb16x 19, 18, 14 # load block
688 lxvb16x 20, 19, 14 # load block
707 vcipher 19, 19, 23
716 vcipher 19, 19, 24
733 vcipher 19, 19, 23
742 vcipher 19, 19, 24
761 stxvb16x 47, 0, 9 # store output
773 vcipherlast 19, 19, 23
776 xxlxor 51, 51, 19
779 stxvb16x 52, 19, 9 # store output
794 xxlor 27+32, 0, 0
805 vxor 19, 30, 27
813 addi 12, 12, -128
819 stxvb16x 30+32, 0, 7 # update IV
822 cmpdi 12, 0
837 lxvb16x 15, 0, 14 # load block
872 stxvb16x 47, 0, 9 # store output
879 addi 12, 12, -16
881 xxlor 19+32, 0, 0
883 vxor 15, 30, 19 # add round key
887 li 15, 0
889 stxvb16x 30+32, 0, 7 # update IV
890 cmpdi 12, 0
928 li 21, 0 # encrypt
930 cmpdi 15, 0
934 cmpdi 12, 0
940 lxvb16x 15, 0, 14 # load last block
947 vspltisb 16, -1 # first 16 bytes - 0xffff...ff
948 vspltisb 17, 0 # second 16 bytes - 0x0000...00
964 stxvb16x 30+32, 0, 7 # update IV
968 stxvb16x 32, 0, 8 # write out Xi
976 vspltisb 16, -1 # first 16 bytes - 0xffff...ff
977 vspltisb 17, 0 # second 16 bytes - 0x0000...00
986 lxvb16x \_mask, 0, 10 # load partial block mask
1005 lxvb16x 17+32, 0, 14 # load last block
1007 mtvsrdd 32+16, 0, 16
1012 vxor 0, 0, 0 # clear Xi
1015 cmpdi 21, 0 # encrypt/decrypt ops?
1025 vxor 0, 0, 29
1026 stxvb16x 32, 0, 8 # save Xi
1032 mtvsrdd 32+16, 0, 16
1034 #stxvb16x 15+32, 0, 9 # last block
1037 sub 17, 16, 15 # 16 - partial
1049 addi 10, 9, -1
1069 stxvb16x 30+32, 0, 7 # update IV
1070 xxlor 32+29, 0, 0
1071 vxor 15, 30, 29 # IV + round key - add round key 0
1072 li 15, 0
1073 std 15, 56(7) # partial done - clear
1083 # r9 - output
1084 # r12 - remaining bytes
1085 # v15 - partial input data
1091 addi 10, 9, -1
1095 li 15, 0
1106 stxvb16x 32, 0, 8 # write out Xi
1122 # initialize ICB: GHASH( IV ), IV - r7
1123 lxvb16x 30+32, 0, 7 # load IV - v30
1126 li 11, 0 # block index
1134 lxv 0, 0(6)
1135 lxv 1, 0x10(6)
1136 lxv 2, 0x20(6)
1137 lxv 3, 0x30(6)
1138 lxv 4, 0x40(6)
1139 lxv 5, 0x50(6)
1140 lxv 6, 0x60(6)
1141 lxv 7, 0x70(6)
1142 lxv 8, 0x80(6)
1143 lxv 9, 0x90(6)
1144 lxv 10, 0xa0(6)
1146 # load rounds - 10 (128), 12 (192), 14 (256)
1151 xxlor 32+29, 0, 0
1152 vxor 15, 30, 29 # IV + round key - add round key 0
1158 lxv 11, 0xb0(6)
1159 lxv 12, 0xc0(6)
1165 lxv 13, 0xd0(6)
1166 lxv 14, 0xe0(6)
1182 cmpdi 15, 0
1191 divdu 10, 12, 10 # n 128 bytes-blocks
1192 cmpdi 10, 0
1202 vxor 19, 30, 29
1216 li 19, 80
1224 lxvb16x 15, 0, 14 # load block
1228 lxvb16x 19, 18, 14 # load block
1229 lxvb16x 20, 19, 14 # load block
1248 vcipher 19, 19, 23
1257 vcipher 19, 19, 24
1274 vcipher 19, 19, 23
1283 vcipher 19, 19, 24
1302 stxvb16x 47, 0, 9 # store output
1314 vcipherlast 19, 19, 23
1317 xxlxor 51, 51, 19
1320 stxvb16x 52, 19, 9 # store output
1336 xxlor 19+32, 19, 19
1344 xxlor 27+32, 0, 0
1355 vxor 19, 30, 27
1363 addi 12, 12, -128
1369 stxvb16x 30+32, 0, 7 # update IV
1372 cmpdi 12, 0
1387 lxvb16x 15, 0, 14 # load block
1422 stxvb16x 47, 0, 9 # store output
1430 addi 12, 12, -16
1432 xxlor 19+32, 0, 0
1434 vxor 15, 30, 19 # add round key
1438 li 15, 0
1440 stxvb16x 30+32, 0, 7 # update IV
1441 cmpdi 12, 0
1481 cmpdi 15, 0
1484 cmpdi 12, 0
1490 lxvb16x 15, 0, 14 # load last block
1497 vspltisb 16, -1 # first 16 bytes - 0xffff...ff
1498 vspltisb 17, 0 # second 16 bytes - 0x0000...00
1515 stxvb16x 30+32, 0, 7 # update IV
1519 stxvb16x 32, 0, 8 # write out Xi