Lines Matching +full:11 +full:- +full:14
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 # Copyright 2023- IBM Corp. All rights reserved
43 #include <asm/asm-offsets.h>
44 #include <asm/asm-compat.h>
81 stdu 1,-752(1)
83 SAVE_GPR 14, 112, 1
116 SAVE_VSX 14, 192, 9
151 RESTORE_VSX 14, 192, 9
170 RESTORE_GPR 14, 112, 1
209 vpermxor 14, 14, 2, 25
218 vadduwm 10, 10, 14
219 vadduwm 11, 11, 15
227 vxor 7, 7, 11
257 vpermxor 14, 14, 2, 25
266 vadduwm 10, 10, 14
267 vadduwm 11, 11, 15
277 vxor 7, 7, 11
307 vpermxor 14, 14, 3, 25
315 vadduwm 11, 11, 12
317 vadduwm 9, 9, 14
323 vxor 6, 6, 11
357 vpermxor 14, 14, 3, 25
365 vadduwm 11, 11, 12
367 vadduwm 9, 9, 14
376 vxor 6, 6, 11
402 vpermxor 14, 14, 2, 20
406 vadduwm 10, 10, 14
407 vadduwm 11, 11, 15
411 vxor 7, 7, 11
422 vpermxor 14, 14, 2, 22
426 vadduwm 10, 10, 14
427 vadduwm 11, 11, 15
431 vxor 7, 7, 11
445 vpermxor 14, 14, 3, 20
447 vadduwm 11, 11, 12
449 vadduwm 9, 9, 14
451 vxor 6, 6, 11
465 vpermxor 14, 14, 3, 22
467 vadduwm 11, 11, 12
469 vadduwm 9, 9, 14
471 vxor 6, 6, 11
483 xxmrghw 11, 32+\a2, 32+\a3 # a2, a3, b2, b3
486 xxpermdi 32+\a0, 10, 11, 0 # a0, a1, a2, a3
487 xxpermdi 32+\a1, 10, 11, 3 # b0, b1, b2, b3
494 vadduwm \S+0, \S+0, 16-\S
495 vadduwm \S+4, \S+4, 17-\S
496 vadduwm \S+8, \S+8, 18-\S
497 vadduwm \S+12, \S+12, 19-\S
499 vadduwm \S+1, \S+1, 16-\S
500 vadduwm \S+5, \S+5, 17-\S
501 vadduwm \S+9, \S+9, 18-\S
502 vadduwm \S+13, \S+13, 19-\S
504 vadduwm \S+2, \S+2, 16-\S
505 vadduwm \S+6, \S+6, 17-\S
506 vadduwm \S+10, \S+10, 18-\S
507 vadduwm \S+14, \S+14, 19-\S
509 vadduwm \S+3, \S+3, 16-\S
510 vadduwm \S+7, \S+7, 17-\S
511 vadduwm \S+11, \S+11, 18-\S
512 vadduwm \S+15, \S+15, 19-\S
519 add 9, 14, 5
520 add 16, 14, 4
532 lxvw4x 11, 27, 9
535 lxvw4x 14, 30, 9
549 xxlxor \S+46, \S+46, 11
552 xxlxor \S+43, \S+43, 14
587 # r17 - r31 mainly for Write_256 macro.
605 li 14, 0 # offset to inp and outp
624 addis 11, 2, permx@toc@ha
625 addi 11, 11, permx@toc@l
626 lxvw4x 32+20, 0, 11
627 lxvw4x 32+22, 17, 11
670 xxspltw 32+11, 18, 3
673 xxspltw 32+14, 19, 2
708 TP_4x 8, 9, 10, 11
709 TP_4x 12, 13, 14, 15
725 addi 14, 14, 256 # offset +=256
726 addi 15, 15, -256 # len -=256
734 TP_4x 16+8, 16+9, 16+10, 16+11
735 TP_4x 16+12, 16+13, 16+14, 16+15
743 addi 14, 14, 256 # offset +=256
744 addi 15, 15, -256 # len +=256
771 addis 11, 2, permx@toc@ha
772 addi 11, 11, permx@toc@l
773 lxvw4x 32+20, 0, 11
774 lxvw4x 32+22, 17, 11
792 vspltw 11, 18, 3
796 vspltw 14, 19, 2
808 TP_4x 8, 9, 10, 11
809 TP_4x 12, 13, 14, 15
813 addi 14, 14, 256 # offset += 256
814 addi 15, 15, -256 # len += 256