Lines Matching +full:4 +full:a
14 #define b_offset 4
27 /* define a few register aliases to allow macro substitution */
60 * a input register containing a (rotated 16)
64 * operations on a and b are interleaved to increase performance
66 #define encrypt_round(a,b,c,d,round)\ argument
68 mov s1(%r11,%rdi,4),%r8d;\
69 movzx a ## B, %edi;\
70 mov s2(%r11,%rdi,4),%r9d;\
73 xor s2(%r11,%rdi,4),%r8d;\
74 movzx a ## H, %edi;\
75 ror $16, a ## D;\
76 xor s3(%r11,%rdi,4),%r9d;\
78 xor s3(%r11,%rdi,4),%r8d;\
79 movzx a ## B, %edi;\
80 xor (%r11,%rdi,4), %r9d;\
83 xor (%r11,%rdi,4), %r8d;\
84 movzx a ## H, %edi;\
85 xor s1(%r11,%rdi,4),%r9d;\
91 add k+4+round(%r11),%r8d;\
95 * a input register containing a(rotated 16)
99 * operations on a and b are interleaved to increase performance
100 * during the round a and b are prepared for the output whitening
102 #define encrypt_last_round(a,b,c,d,round)\ argument
106 mov s1(%r11,%rdi,4),%r8d;\
107 movzx a ## B, %edi;\
108 mov s2(%r11,%rdi,4),%r9d;\
111 xor s2(%r11,%rdi,4),%r8d;\
112 movzx a ## H, %edi;\
113 ror $16, a ## D;\
114 xor s3(%r11,%rdi,4),%r9d;\
116 xor s3(%r11,%rdi,4),%r8d;\
117 movzx a ## B, %edi;\
118 xor (%r11,%rdi,4), %r9d;\
119 xor a, %r10;\
121 xor (%r11,%rdi,4), %r8d;\
122 movzx a ## H, %edi;\
123 xor s1(%r11,%rdi,4),%r9d;\
129 add k+4+round(%r11),%r8d;\
133 * a input register containing a
137 * operations on a and b are interleaved to increase performance
139 #define decrypt_round(a,b,c,d,round)\ argument
140 movzx a ## B, %edi;\
141 mov (%r11,%rdi,4), %r9d;\
143 mov s3(%r11,%rdi,4),%r8d;\
144 movzx a ## H, %edi;\
145 ror $16, a ## D;\
146 xor s1(%r11,%rdi,4),%r9d;\
149 xor (%r11,%rdi,4), %r8d;\
150 movzx a ## B, %edi;\
151 xor s2(%r11,%rdi,4),%r9d;\
153 xor s1(%r11,%rdi,4),%r8d;\
154 movzx a ## H, %edi;\
155 ror $15, a ## D;\
156 xor s3(%r11,%rdi,4),%r9d;\
158 xor s2(%r11,%rdi,4),%r8d;\
163 add k+4+round(%r11),%r8d;\
168 * a input register containing a
172 * operations on a and b are interleaved to increase performance
173 * during the round a and b are prepared for the output whitening
175 #define decrypt_last_round(a,b,c,d,round)\ argument
176 movzx a ## B, %edi;\
177 mov (%r11,%rdi,4), %r9d;\
179 mov s3(%r11,%rdi,4),%r8d;\
182 xor (%r11,%rdi,4), %r8d;\
183 movzx a ## H, %edi;\
186 xor a, %r10;\
187 ror $16, a ## D;\
188 xor s1(%r11,%rdi,4),%r9d;\
190 xor s1(%r11,%rdi,4),%r8d;\
191 movzx a ## B, %edi;\
192 xor s2(%r11,%rdi,4),%r9d;\
194 xor s2(%r11,%rdi,4),%r8d;\
195 movzx a ## H, %edi;\
196 xor s3(%r11,%rdi,4),%r9d;\
201 add k+4+round(%r11),%r8d;\
230 encrypt_round(R0,R1,R2,R3,4*8);
290 decrypt_round(R2,R3,R0,R1,4*8);