Lines Matching +full:4 +full:- +full:16
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 # Copyright 2023- IBM Corp. All rights reserved
14 # 1. a += b; d ^= a; d <<<= 16;
17 # 4. c += d; b ^= c; b <<<= 7
19 # row1 = (row1 + row2), row4 = row1 xor row4, row4 rotate each word by 16
24 # 4 blocks (a b c d)
43 #include <asm/asm-offsets.h>
44 #include <asm/asm-compat.h>
55 li 16, \OFFSET
56 stvx \VRS, 16, \FRAME
60 li 16, \OFFSET
61 stxvx \VSX, 16, \FRAME
69 li 16, \OFFSET
70 lvx \VRS, 16, \FRAME
74 li 16, \OFFSET
75 lxvx \VSX, 16, \FRAME
80 std 0, 16(1)
81 stdu 1,-752(1)
85 SAVE_GPR 16, 128, 1
104 SAVE_VRS 21, 16, 9
118 SAVE_VSX 16, 224, 9
139 RESTORE_VRS 21, 16, 9
153 RESTORE_VSX 16, 224, 9
172 RESTORE_GPR 16, 128, 1
190 ld 0, 16(1)
198 vadduwm 0, 0, 4
202 vadduwm 16, 16, 20
211 vpermxor 28, 28, 16, 25
224 vxor 4, 4, 8
235 vrlw 4, 4, 25 #
244 vadduwm 0, 0, 4
248 vadduwm 16, 16, 20
259 vpermxor 28, 28, 16, 25
274 vxor 4, 4, 8
282 vrlw 4, 4, 28 #
298 vadduwm 3, 3, 4
299 vadduwm 16, 16, 21
308 vpermxor 31, 31, 16, 25
325 vxor 4, 4, 9
336 vrlw 4, 4, 25
346 vadduwm 3, 3, 4
347 vadduwm 16, 16, 21
358 vpermxor 31, 31, 16, 25
378 vxor 4, 4, 9
386 vrlw 4, 4, 28
396 vadduwm 0, 0, 4
408 vxor 4, 4, 8
412 vrlw 4, 4, 21
416 vadduwm 0, 0, 4
428 vxor 4, 4, 8
432 vrlw 4, 4, 23
441 vadduwm 3, 3, 4
453 vxor 4, 4, 9
457 vrlw 4, 4, 21
461 vadduwm 3, 3, 4
473 vxor 4, 4, 9
477 vrlw 4, 4, 23
494 vadduwm \S+0, \S+0, 16-\S
495 vadduwm \S+4, \S+4, 17-\S
496 vadduwm \S+8, \S+8, 18-\S
497 vadduwm \S+12, \S+12, 19-\S
499 vadduwm \S+1, \S+1, 16-\S
500 vadduwm \S+5, \S+5, 17-\S
501 vadduwm \S+9, \S+9, 18-\S
502 vadduwm \S+13, \S+13, 19-\S
504 vadduwm \S+2, \S+2, 16-\S
505 vadduwm \S+6, \S+6, 17-\S
506 vadduwm \S+10, \S+10, 18-\S
507 vadduwm \S+14, \S+14, 19-\S
509 vadduwm \S+3, \S+3, 16-\S
510 vadduwm \S+7, \S+7, 17-\S
511 vadduwm \S+11, \S+11, 18-\S
512 vadduwm \S+15, \S+15, 19-\S
520 add 16, 14, 4
525 lxvw4x 4, 20, 9
542 xxlxor \S+33, \S+33, 4
555 stxvw4x \S+32, 0, 16
556 stxvw4x \S+36, 17, 16
557 stxvw4x \S+40, 18, 16
558 stxvw4x \S+44, 19, 16
560 stxvw4x \S+33, 20, 16
561 stxvw4x \S+37, 21, 16
562 stxvw4x \S+41, 22, 16
563 stxvw4x \S+45, 23, 16
565 stxvw4x \S+34, 24, 16
566 stxvw4x \S+38, 25, 16
567 stxvw4x \S+42, 26, 16
568 stxvw4x \S+46, 27, 16
570 stxvw4x \S+35, 28, 16
571 stxvw4x \S+39, 29, 16
572 stxvw4x \S+43, 30, 16
573 stxvw4x \S+47, 31, 16
587 # r17 - r31 mainly for Write_256 macro.
588 li 17, 16
617 vmrghw 4, 0, 1
619 vsldoi 30, 4, 5, 8 # vr30 counter, 4 (0, 1, 2, 3)
634 xxlor 16, 48, 48
639 vspltisw 25, 4
645 vadduwm 31, 30, 25 # counter = (0, 1, 2, 3) + (4, 4, 4, 4)
658 xxspltw 32+0, 16, 0
659 xxspltw 32+1, 16, 1
660 xxspltw 32+2, 16, 2
661 xxspltw 32+3, 16, 3
663 xxspltw 32+4, 17, 0
677 xxspltw 32+16, 16, 0
678 xxspltw 32+17, 16, 1
679 xxspltw 32+18, 16, 2
680 xxspltw 32+19, 16, 3
707 TP_4x 4, 5, 6, 7
715 xxlor 48, 16, 16
726 addi 15, 15, -256 # len -=256
732 TP_4x 16+0, 16+1, 16+2, 16+3
733 TP_4x 16+4, 16+5, 16+6, 16+7
734 TP_4x 16+8, 16+9, 16+10, 16+11
735 TP_4x 16+12, 16+13, 16+14, 16+15
737 xxlor 32, 16, 16
741 Add_state 16
742 Write_256 16
744 addi 15, 15, -256 # len +=256
780 vspltw 0, 16, 0
781 vspltw 1, 16, 1
782 vspltw 2, 16, 2
783 vspltw 3, 16, 3
785 vspltw 4, 17, 0
807 TP_4x 4, 5, 6, 7
814 addi 15, 15, -256 # len += 256
817 vspltisw 25, 4