Lines Matching +full:32 +full:- +full:63

1 /* gf128mul.h - GF(2^128) multiplication functions
16 ---------------------------------------------------------------------------
43 ---------------------------------------------------------------------------
59 * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf
61 * The elements of GF(2^128) := GF(2)[X]/(X^128-X^7-X^2-X^1-1) can
73 * in every byte in little-endian order and the bytes themselves also in
74 * little endian order. I will call this lle (little-little-endian).
81 * bytes also. This is bbe (big-big-endian). Now the buffer above
86 * Both of the above formats are easy to implement on big-endian
94 * The common machine word-size is smaller than 128 bits, so to make
96 * This implementation uses 64-bit words for the moment. Machine
111 * 39...32 47...40 55...48 63...56 07...00 15...08 23...16 31...24
117 * 31...24 23...16 15...08 07...00 63...56 55...48 47...40 39...32
123 * Multiplications in GF(2^128) are mostly bit-shifts, so you see why
124 * ble (and lbe also) are easier to implement on a little-endian
125 * machine than on a big-endian machine. The converse holds for bbe
129 * to keep elements of GF(2^128) in type u64[2]. On 32-bit wordsize
130 * machines this will automatically aligned to wordsize and on a 64-bit
139 positions within four 32-bit words in the following way
143 24...31 16...23 08...15 00...07 56...63 48...55 40...47 32...39
150 positions within four 32-bit words in the following way
154 00...07 08...15 16...23 24...31 32...39 40...47 48...55 56...63
169 * the polynomial field representation. They use 64-bit word operations
178 /* a constant-time version of 'x & ((u64)1 << which) ? (u64)-1 : 0' */ in gf128mul_mask_from_bit()
179 return ((s64)(x << (63 - which)) >> 63); in gf128mul_mask_from_bit()
184 u64 a = be64_to_cpu(x->a); in gf128mul_x_lle()
185 u64 b = be64_to_cpu(x->b); in gf128mul_x_lle()
191 r->b = cpu_to_be64((b >> 1) | (a << 63)); in gf128mul_x_lle()
192 r->a = cpu_to_be64((a >> 1) ^ _tt); in gf128mul_x_lle()
197 u64 a = be64_to_cpu(x->a); in gf128mul_x_bbe()
198 u64 b = be64_to_cpu(x->b); in gf128mul_x_bbe()
200 /* equivalent to gf128mul_table_be[a >> 63] (see crypto/gf128mul.c): */ in gf128mul_x_bbe()
201 u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87; in gf128mul_x_bbe()
203 r->a = cpu_to_be64((a << 1) | (b >> 63)); in gf128mul_x_bbe()
204 r->b = cpu_to_be64((b << 1) ^ _tt); in gf128mul_x_bbe()
210 u64 a = le64_to_cpu(x->a); in gf128mul_x_ble()
211 u64 b = le64_to_cpu(x->b); in gf128mul_x_ble()
213 /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */ in gf128mul_x_ble()
214 u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87; in gf128mul_x_ble()
216 r->a = cpu_to_le64((a << 1) | (b >> 63)); in gf128mul_x_ble()
217 r->b = cpu_to_le64((b << 1) ^ _tt); in gf128mul_x_ble()