Lines Matching +full:4 +full:e

48 # This code schedules 2 blocks at a time, with 4 lanes per block
98 e = %edx # clobbers NUM_BLKS define
116 _XFER_SIZE = 2*64*4 # 2 blocks, 64 rounds, 4 bytes/round
146 f = e
147 e = d define
158 rorx $25, e, y0 # y0 = e >> 25 # S1A
159 rorx $11, e, y1 # y1 = e >> 11 # S1B
163 vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7]
167 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
169 vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]# y1 = (e >> 6)# S1
170 rorx $6, e, y1 # y1 = (e >> 6) # S1
172 and e, y2 # y2 = (f^g)&e # CH
173 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
178 vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15]
182 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
206 rorx $25, e, y0 # y0 = e >> 25 # S1A
207 rorx $11, e, y1 # y1 = e >> 11 # S1B
208 offset = \disp + 1*4
216 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
220 rorx $6, e, y1 # y1 = (e >> 6) # S1
221 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
223 and e, y2 # y2 = (f^g)&e # CH
232 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
258 rorx $25, e, y0 # y0 = e >> 25 # S1A
259 offset = \disp + 2*4
263 rorx $11, e, y1 # y1 = e >> 11 # S1B
269 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
271 and e, y2 # y2 = (f^g)&e # CH
273 rorx $6, e, y1 # y1 = (e >> 6) # S1
278 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
281 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
307 rorx $25, e, y0 # y0 = e >> 25 # S1A
308 rorx $11, e, y1 # y1 = e >> 11 # S1B
309 offset = \disp + 3*4
317 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
322 rorx $6, e, y1 # y1 = (e >> 6) # S1
323 and e, y2 # y2 = (f^g)&e # CH
328 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
329 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
360 rorx $25, e, y0 # y0 = e >> 25 # S1A
361 rorx $11, e, y1 # y1 = e >> 11 # S1B
364 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
365 rorx $6, e, y1 # y1 = (e >> 6) # S1
366 and e, y2 # y2 = (f^g)&e # CH
368 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
370 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
397 rorx $25, e, y0 # y0 = e >> 25 # S1A
398 rorx $11, e, y1 # y1 = e >> 11 # S1B
401 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
402 rorx $6, e, y1 # y1 = (e >> 6) # S1
403 and e, y2 # y2 = (f^g)&e # CH
406 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
408 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
414 offset = 4*1 + \disp
437 rorx $25, e, y0 # y0 = e >> 25 # S1A
438 rorx $11, e, y1 # y1 = e >> 11 # S1B
441 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
442 rorx $6, e, y1 # y1 = (e >> 6) # S1
443 and e, y2 # y2 = (f^g)&e # CH
446 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
448 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
454 offset = 4*2 + \disp
477 rorx $25, e, y0 # y0 = e >> 25 # S1A
478 rorx $11, e, y1 # y1 = e >> 11 # S1B
481 xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
482 rorx $6, e, y1 # y1 = (e >> 6) # S1
483 and e, y2 # y2 = (f^g)&e # CH
486 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
488 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
494 offset = 4*3 + \disp
550 mov 4*1(CTX), b
551 mov 4*2(CTX), c
552 mov 4*3(CTX), d
553 mov 4*4(CTX), e
554 mov 4*5(CTX), f
555 mov 4*6(CTX), g
556 mov 4*7(CTX), h
612 add $4*32, SRND
613 cmp $3*4*32, SRND
632 cmp $4*4*32, SRND
638 addm (4*0)(CTX),a
639 addm (4*1)(CTX),b
640 addm (4*2)(CTX),c
641 addm (4*3)(CTX),d
642 addm (4*4)(CTX),e
643 addm (4*5)(CTX),f
644 addm (4*6)(CTX),g
645 addm (4*7)(CTX),h
657 cmp $4*4*32, SRND
664 addm (4*0)(CTX),a
665 addm (4*1)(CTX),b
666 addm (4*2)(CTX),c
667 addm (4*3)(CTX),d
668 addm (4*4)(CTX),e
669 addm (4*5)(CTX),f
670 addm (4*6)(CTX),g
671 addm (4*7)(CTX),h
693 mov (4*0)(CTX),a
694 mov (4*1)(CTX),b
695 mov (4*2)(CTX),c
696 mov (4*3)(CTX),d
697 mov (4*4)(CTX),e
698 mov (4*5)(CTX),f
699 mov (4*6)(CTX),g
700 mov (4*7)(CTX),h