Lines Matching +full:6 +full:- +full:14
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 # Copyright 2023- IBM Corp. All rights reserved
43 #include <asm/asm-offsets.h>
44 #include <asm/asm-compat.h>
81 stdu 1,-752(1)
83 SAVE_GPR 14, 112, 1
116 SAVE_VSX 14, 192, 9
151 RESTORE_VSX 14, 192, 9
170 RESTORE_GPR 14, 112, 1
200 vadduwm 2, 2, 6
209 vpermxor 14, 14, 2, 25
218 vadduwm 10, 10, 14
226 vxor 6, 6, 10
237 vrlw 6, 6, 25
246 vadduwm 2, 2, 6
257 vpermxor 14, 14, 2, 25
266 vadduwm 10, 10, 14
276 vxor 6, 6, 10
284 vrlw 6, 6, 28
296 vadduwm 1, 1, 6
307 vpermxor 14, 14, 3, 25
317 vadduwm 9, 9, 14
323 vxor 6, 6, 11
334 vrlw 6, 6, 25
344 vadduwm 1, 1, 6
357 vpermxor 14, 14, 3, 25
367 vadduwm 9, 9, 14
376 vxor 6, 6, 11
384 vrlw 6, 6, 28
398 vadduwm 2, 2, 6
402 vpermxor 14, 14, 2, 20
406 vadduwm 10, 10, 14
410 vxor 6, 6, 10
414 vrlw 6, 6, 21
418 vadduwm 2, 2, 6
422 vpermxor 14, 14, 2, 22
426 vadduwm 10, 10, 14
430 vxor 6, 6, 10
434 vrlw 6, 6, 23
439 vadduwm 1, 1, 6
445 vpermxor 14, 14, 3, 20
449 vadduwm 9, 9, 14
451 vxor 6, 6, 11
455 vrlw 6, 6, 21
459 vadduwm 1, 1, 6
465 vpermxor 14, 14, 3, 22
469 vadduwm 9, 9, 14
471 vxor 6, 6, 11
475 vrlw 6, 6, 23
494 vadduwm \S+0, \S+0, 16-\S
495 vadduwm \S+4, \S+4, 17-\S
496 vadduwm \S+8, \S+8, 18-\S
497 vadduwm \S+12, \S+12, 19-\S
499 vadduwm \S+1, \S+1, 16-\S
500 vadduwm \S+5, \S+5, 17-\S
501 vadduwm \S+9, \S+9, 18-\S
502 vadduwm \S+13, \S+13, 19-\S
504 vadduwm \S+2, \S+2, 16-\S
505 vadduwm \S+6, \S+6, 17-\S
506 vadduwm \S+10, \S+10, 18-\S
507 vadduwm \S+14, \S+14, 19-\S
509 vadduwm \S+3, \S+3, 16-\S
510 vadduwm \S+7, \S+7, 17-\S
511 vadduwm \S+11, \S+11, 18-\S
512 vadduwm \S+15, \S+15, 19-\S
519 add 9, 14, 5
520 add 16, 14, 4
527 lxvw4x 6, 22, 9
535 lxvw4x 14, 30, 9
544 xxlxor \S+41, \S+41, 6
552 xxlxor \S+43, \S+43, 14
582 cmpdi 6, 0
587 # r17 - r31 mainly for Write_256 macro.
604 mr 15, 6 # len
605 li 14, 0 # offset to inp and outp
654 cmpdi 6, 512
665 xxspltw 32+6, 17, 2
673 xxspltw 32+14, 19, 2
707 TP_4x 4, 5, 6, 7
709 TP_4x 12, 13, 14, 15
725 addi 14, 14, 256 # offset +=256
726 addi 15, 15, -256 # len -=256
733 TP_4x 16+4, 16+5, 16+6, 16+7
735 TP_4x 16+12, 16+13, 16+14, 16+15
743 addi 14, 14, 256 # offset +=256
744 addi 15, 15, -256 # len +=256
787 vspltw 6, 17, 2
796 vspltw 14, 19, 2
807 TP_4x 4, 5, 6, 7
809 TP_4x 12, 13, 14, 15
813 addi 14, 14, 256 # offset += 256
814 addi 15, 15, -256 # len += 256