Lines Matching +full:c +full:- +full:22
2 # Implement fast SHA-256 with SSSE3 instructions. (x86_64)
4 # Copyright (C) 2013 Intel Corporation.
9 # Tim Chen <tim.c.chen@linux.intel.com>
21 # - Redistributions of source code must retain the above
25 # - Redistributions in binary form must reproduce the above
41 # This code is described in an Intel White-Paper:
42 # "Fast SHA-256 Implementations on Intel Architecture Processors"
58 # Add reg to mem using reg-mem add and store
87 SHUF_00BA = %xmm10 # shuffle xBxA -> 00BA
88 SHUF_DC00 = %xmm11 # shuffle xDxC -> DC00
96 c = %ecx define
142 d = c
143 c = b define
150 ## compute W[-16] + W[-7] 4 at a time
153 ror $(25-11), y0 # y0 = e >> (25-11)
155 palignr $4, X2, XTMP0 # XTMP0 = W[-7]
156 ror $(22-13), y1 # y1 = a >> (22-13)
157 xor e, y0 # y0 = e ^ (e >> (25-11))
159 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
161 xor a, y1 # y1 = a ^ (a >> (22-13)
163 paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16]
164 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
166 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
168 palignr $4, X0, XTMP1 # XTMP1 = W[-15]
169 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
172 movdqa XTMP1, XTMP2 # XTMP2 = W[-15]
173 ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
176 movdqa XTMP1, XTMP3 # XTMP3 = W[-15]
180 pslld $(32-7), XTMP1 #
181 or c, y0 # y0 = a|c
183 and c, y2 # y2 = a&c
185 and b, y0 # y0 = (a|c)&b
187 por XTMP2, XTMP1 # XTMP1 = W[-15] ror 7
188 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
192 movdqa XTMP3, XTMP2 # XTMP2 = W[-15]
195 movdqa XTMP3, XTMP4 # XTMP4 = W[-15]
196 ror $(25-11), y0 # y0 = e >> (25-11)
197 xor e, y0 # y0 = e ^ (e >> (25-11))
199 ror $(22-13), y1 # y1 = a >> (22-13)
200 pslld $(32-18), XTMP3 #
201 xor a, y1 # y1 = a ^ (a >> (22-13)
202 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
205 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
206 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
210 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
212 psrld $3, XTMP4 # XTMP4 = W[-15] >> 3
215 ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
216 pxor XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 ^ W[-15] ror 18
221 or c, y0 # y0 = a|c
223 and c, y2 # y2 = a&c
225 pshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
226 and b, y0 # y0 = (a|c)&b
228 paddd XTMP1, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
229 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
233 movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {BBAA}
236 ror $(25-11), y0 # y0 = e >> (25-11)
237 movdqa XTMP2, XTMP4 # XTMP4 = W[-2] {BBAA}
238 xor e, y0 # y0 = e ^ (e >> (25-11))
239 ror $(22-13), y1 # y1 = a >> (22-13)
241 xor a, y1 # y1 = a ^ (a >> (22-13)
242 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
243 psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA}
245 psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA}
246 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
248 psrld $10, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
249 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
250 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
255 ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
262 or c, y0 # y0 = a|c
264 and c, y2 # y2 = a&c
266 and b, y0 # y0 = (a|c)&b
269 pshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {BBAA}
270 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
274 movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {DDCC}
276 ror $(25-11), y0 # y0 = e >> (25-11)
278 movdqa XTMP2, X0 # X0 = W[-2] {DDCC}
279 ror $(22-13), y1 # y1 = a >> (22-13)
280 xor e, y0 # y0 = e ^ (e >> (25-11))
282 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
283 psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC}
284 xor a, y1 # y1 = a ^ (a >> (22-13)
286 psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC}
287 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25
289 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
290 psrld $10, X0 # X0 = W[-2] >> 10 {DDCC}
291 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22
303 or c, y0 # y0 = a|c
305 and c, y2 # y2 = a&c
307 and b, y0 # y0 = (a|c)&b
309 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
319 ror $(25-11), y0 # y0 = e >> (25-11)
321 xor e, y0 # y0 = e ^ (e >> (25-11))
322 ror $(22-13), y1 # y1 = a >> (22-13)
324 xor a, y1 # y1 = a ^ (a >> (22-13)
325 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
327 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
328 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
330 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
334 ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
340 or c, y0 # y0 = a|c
342 and c, y2 # y2 = a&c
343 and b, y0 # y0 = (a|c)&b
345 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
379 mov 4*2(CTX), c
453 addm (4*2)(CTX),c
505 # shuffle xBxA -> 00BA
511 # shuffle xDxC -> DC00