Lines Matching refs:b

27 	aese	\va\().16b, \vk\().16b
28 aesmc \va\().16b, \va\().16b
29 aese \vb\().16b, \vk\().16b
30 aesmc \vb\().16b, \vb\().16b
43 aese \va\().16b, v4.16b
44 aese \vb\().16b, v4.16b
50 ld1 {v0.16b}, [x5] /* load mac */
55 ld1 {v1.8b}, [x6] /* load upper ctr */
65 ld1 {v2.16b}, [x1], #16 /* load next input block */
67 eor v2.16b, v2.16b, v5.16b /* final round enc+mac */
68 eor v6.16b, v1.16b, v2.16b /* xor with crypted ctr */
70 eor v2.16b, v2.16b, v1.16b /* xor with crypted ctr */
71 eor v6.16b, v2.16b, v5.16b /* final round enc */
73 eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
74 st1 {v6.16b}, [x0], #16 /* write output block */
75 bne 0b
79 st1 {v0.16b}, [x5] /* store mac */
84 eor v0.16b, v0.16b, v5.16b /* final round mac */
85 eor v1.16b, v1.16b, v5.16b /* final round enc */
93 ld1 {v7.16b-v8.16b}, [x9]
94 ld1 {v9.16b}, [x8]
96 ld1 {v2.16b}, [x1] /* load a full block of input */
97 tbl v1.16b, {v1.16b}, v7.16b /* move keystream to end of register */
98 eor v7.16b, v2.16b, v1.16b /* encrypt partial input block */
99 bif v2.16b, v7.16b, v22.16b /* select plaintext */
100 tbx v7.16b, {v6.16b}, v8.16b /* insert output from previous iteration */
101 tbl v2.16b, {v2.16b}, v9.16b /* copy plaintext to start of v2 */
102 eor v0.16b, v0.16b, v2.16b /* fold plaintext into mac */
104 st1 {v7.16b}, [x0] /* store output block */
108 ld1 {v1.16b}, [x7] /* load 1st ctriv */
113 eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
114 0: st1 {v0.16b}, [x5] /* store result */
127 movi v22.16b, #255
132 movi v22.16b, #0