Lines Matching full:u64

17 static __always_inline u64 u64_eq_mask(u64 a, u64 b)  in u64_eq_mask()
19 u64 x = a ^ b; in u64_eq_mask()
20 u64 minus_x = ~x + (u64)1U; in u64_eq_mask()
21 u64 x_or_minus_x = x | minus_x; in u64_eq_mask()
22 u64 xnx = x_or_minus_x >> (u32)63U; in u64_eq_mask()
23 u64 c = xnx - (u64)1U; in u64_eq_mask()
27 static __always_inline u64 u64_gte_mask(u64 a, u64 b) in u64_gte_mask()
29 u64 x = a; in u64_gte_mask()
30 u64 y = b; in u64_gte_mask()
31 u64 x_xor_y = x ^ y; in u64_gte_mask()
32 u64 x_sub_y = x - y; in u64_gte_mask()
33 u64 x_sub_y_xor_y = x_sub_y ^ y; in u64_gte_mask()
34 u64 q = x_xor_y | x_sub_y_xor_y; in u64_gte_mask()
35 u64 x_xor_q = x ^ q; in u64_gte_mask()
36 u64 x_xor_q_ = x_xor_q >> (u32)63U; in u64_gte_mask()
37 u64 c = x_xor_q_ - (u64)1U; in u64_gte_mask()
41 static __always_inline void modulo_carry_top(u64 *b) in modulo_carry_top()
43 u64 b4 = b[4]; in modulo_carry_top()
44 u64 b0 = b[0]; in modulo_carry_top()
45 u64 b4_ = b4 & 0x7ffffffffffffLLU; in modulo_carry_top()
46 u64 b0_ = b0 + 19 * (b4 >> 51); in modulo_carry_top()
51 static __always_inline void fproduct_copy_from_wide_(u64 *output, u128 *input) in fproduct_copy_from_wide_()
55 output[0] = ((u64)(xi)); in fproduct_copy_from_wide_()
59 output[1] = ((u64)(xi)); in fproduct_copy_from_wide_()
63 output[2] = ((u64)(xi)); in fproduct_copy_from_wide_()
67 output[3] = ((u64)(xi)); in fproduct_copy_from_wide_()
71 output[4] = ((u64)(xi)); in fproduct_copy_from_wide_()
76 fproduct_sum_scalar_multiplication_(u128 *output, u64 *input, u64 s) in fproduct_sum_scalar_multiplication_()
91 u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; in fproduct_carry_wide_()
100 u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; in fproduct_carry_wide_()
110 u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; in fproduct_carry_wide_()
119 u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; in fproduct_carry_wide_()
126 static __always_inline void fmul_shift_reduce(u64 *output) in fmul_shift_reduce()
128 u64 tmp = output[4]; in fmul_shift_reduce()
129 u64 b0; in fmul_shift_reduce()
132 u64 z = output[ctr - 1]; in fmul_shift_reduce()
137 u64 z = output[ctr - 1]; in fmul_shift_reduce()
142 u64 z = output[ctr - 1]; in fmul_shift_reduce()
147 u64 z = output[ctr - 1]; in fmul_shift_reduce()
155 static __always_inline void fmul_mul_shift_reduce_(u128 *output, u64 *input, in fmul_mul_shift_reduce_()
156 u64 *input21) in fmul_mul_shift_reduce_()
159 u64 input2i; in fmul_mul_shift_reduce_()
161 u64 input2i = input21[0]; in fmul_mul_shift_reduce_()
166 u64 input2i = input21[1]; in fmul_mul_shift_reduce_()
171 u64 input2i = input21[2]; in fmul_mul_shift_reduce_()
176 u64 input2i = input21[3]; in fmul_mul_shift_reduce_()
185 static __always_inline void fmul_fmul(u64 *output, u64 *input, u64 *input21) in fmul_fmul()
187 u64 tmp[5] = { input[0], input[1], input[2], input[3], input[4] }; in fmul_fmul()
193 u64 i0; in fmul_fmul()
194 u64 i1; in fmul_fmul()
195 u64 i0_; in fmul_fmul()
196 u64 i1_; in fmul_fmul()
203 b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); in fmul_fmul()
216 static __always_inline void fsquare_fsquare__(u128 *tmp, u64 *output) in fsquare_fsquare__()
218 u64 r0 = output[0]; in fsquare_fsquare__()
219 u64 r1 = output[1]; in fsquare_fsquare__()
220 u64 r2 = output[2]; in fsquare_fsquare__()
221 u64 r3 = output[3]; in fsquare_fsquare__()
222 u64 r4 = output[4]; in fsquare_fsquare__()
223 u64 d0 = r0 * 2; in fsquare_fsquare__()
224 u64 d1 = r1 * 2; in fsquare_fsquare__()
225 u64 d2 = r2 * 2 * 19; in fsquare_fsquare__()
226 u64 d419 = r4 * 19; in fsquare_fsquare__()
227 u64 d4 = d419 * 2; in fsquare_fsquare__()
245 static __always_inline void fsquare_fsquare_(u128 *tmp, u64 *output) in fsquare_fsquare_()
251 u64 i0; in fsquare_fsquare_()
252 u64 i1; in fsquare_fsquare_()
253 u64 i0_; in fsquare_fsquare_()
254 u64 i1_; in fsquare_fsquare_()
260 b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); in fsquare_fsquare_()
272 static __always_inline void fsquare_fsquare_times_(u64 *output, u128 *tmp, in fsquare_fsquare_times_()
281 static __always_inline void fsquare_fsquare_times(u64 *output, u64 *input, in fsquare_fsquare_times()
289 static __always_inline void fsquare_fsquare_times_inplace(u64 *output, in fsquare_fsquare_times_inplace()
296 static __always_inline void crecip_crecip(u64 *out, u64 *z) in crecip_crecip()
298 u64 buf[20] = { 0 }; in crecip_crecip()
299 u64 *a0 = buf; in crecip_crecip()
300 u64 *t00 = buf + 5; in crecip_crecip()
301 u64 *b0 = buf + 10; in crecip_crecip()
302 u64 *t01; in crecip_crecip()
303 u64 *b1; in crecip_crecip()
304 u64 *c0; in crecip_crecip()
305 u64 *a; in crecip_crecip()
306 u64 *t0; in crecip_crecip()
307 u64 *b; in crecip_crecip()
308 u64 *c; in crecip_crecip()
340 static __always_inline void fsum(u64 *a, u64 *b) in fsum()
349 static __always_inline void fdifference(u64 *a, u64 *b) in fdifference()
351 u64 tmp[5] = { 0 }; in fdifference()
352 u64 b0; in fdifference()
353 u64 b1; in fdifference()
354 u64 b2; in fdifference()
355 u64 b3; in fdifference()
356 u64 b4; in fdifference()
369 u64 xi = a[0]; in fdifference()
370 u64 yi = tmp[0]; in fdifference()
374 u64 xi = a[1]; in fdifference()
375 u64 yi = tmp[1]; in fdifference()
379 u64 xi = a[2]; in fdifference()
380 u64 yi = tmp[2]; in fdifference()
384 u64 xi = a[3]; in fdifference()
385 u64 yi = tmp[3]; in fdifference()
389 u64 xi = a[4]; in fdifference()
390 u64 yi = tmp[4]; in fdifference()
395 static __always_inline void fscalar(u64 *output, u64 *b, u64 s) in fscalar()
403 u64 xi = b[0]; in fscalar()
407 u64 xi = b[1]; in fscalar()
411 u64 xi = b[2]; in fscalar()
415 u64 xi = b[3]; in fscalar()
419 u64 xi = b[4]; in fscalar()
426 b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); in fscalar()
432 static __always_inline void fmul(u64 *output, u64 *a, u64 *b) in fmul()
437 static __always_inline void crecip(u64 *output, u64 *input) in crecip()
442 static __always_inline void point_swap_conditional_step(u64 *a, u64 *b, in point_swap_conditional_step()
443 u64 swap1, u32 ctr) in point_swap_conditional_step()
446 u64 ai = a[i]; in point_swap_conditional_step()
447 u64 bi = b[i]; in point_swap_conditional_step()
448 u64 x = swap1 & (ai ^ bi); in point_swap_conditional_step()
449 u64 ai1 = ai ^ x; in point_swap_conditional_step()
450 u64 bi1 = bi ^ x; in point_swap_conditional_step()
455 static __always_inline void point_swap_conditional5(u64 *a, u64 *b, u64 swap1) in point_swap_conditional5()
464 static __always_inline void point_swap_conditional(u64 *a, u64 *b, u64 iswap) in point_swap_conditional()
466 u64 swap1 = 0 - iswap; in point_swap_conditional()
471 static __always_inline void point_copy(u64 *output, u64 *input) in point_copy()
477 static __always_inline void addanddouble_fmonty(u64 *pp, u64 *ppq, u64 *p, in addanddouble_fmonty()
478 u64 *pq, u64 *qmqp) in addanddouble_fmonty()
480 u64 *qx = qmqp; in addanddouble_fmonty()
481 u64 *x2 = pp; in addanddouble_fmonty()
482 u64 *z2 = pp + 5; in addanddouble_fmonty()
483 u64 *x3 = ppq; in addanddouble_fmonty()
484 u64 *z3 = ppq + 5; in addanddouble_fmonty()
485 u64 *x = p; in addanddouble_fmonty()
486 u64 *z = p + 5; in addanddouble_fmonty()
487 u64 *xprime = pq; in addanddouble_fmonty()
488 u64 *zprime = pq + 5; in addanddouble_fmonty()
489 u64 buf[40] = { 0 }; in addanddouble_fmonty()
490 u64 *origx = buf; in addanddouble_fmonty()
491 u64 *origxprime0 = buf + 5; in addanddouble_fmonty()
492 u64 *xxprime0; in addanddouble_fmonty()
493 u64 *zzprime0; in addanddouble_fmonty()
494 u64 *origxprime; in addanddouble_fmonty()
507 u64 *xx0; in addanddouble_fmonty()
508 u64 *zz0; in addanddouble_fmonty()
509 u64 *xxprime; in addanddouble_fmonty()
510 u64 *zzprime; in addanddouble_fmonty()
511 u64 *zzzprime; in addanddouble_fmonty()
526 u64 *zzz; in addanddouble_fmonty()
527 u64 *xx; in addanddouble_fmonty()
528 u64 *zz; in addanddouble_fmonty()
529 u64 scalar; in addanddouble_fmonty()
544 ladder_smallloop_cmult_small_loop_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, in ladder_smallloop_cmult_small_loop_step()
545 u64 *q, u8 byt) in ladder_smallloop_cmult_small_loop_step()
547 u64 bit0 = (u64)(byt >> 7); in ladder_smallloop_cmult_small_loop_step()
548 u64 bit; in ladder_smallloop_cmult_small_loop_step()
551 bit = (u64)(byt >> 7); in ladder_smallloop_cmult_small_loop_step()
556 ladder_smallloop_cmult_small_loop_double_step(u64 *nq, u64 *nqpq, u64 *nq2, in ladder_smallloop_cmult_small_loop_double_step()
557 u64 *nqpq2, u64 *q, u8 byt) in ladder_smallloop_cmult_small_loop_double_step()
566 ladder_smallloop_cmult_small_loop(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, in ladder_smallloop_cmult_small_loop()
567 u64 *q, u8 byt, u32 i) in ladder_smallloop_cmult_small_loop()
576 static __always_inline void ladder_bigloop_cmult_big_loop(u8 *n1, u64 *nq, in ladder_bigloop_cmult_big_loop()
577 u64 *nqpq, u64 *nq2, in ladder_bigloop_cmult_big_loop()
578 u64 *nqpq2, u64 *q, in ladder_bigloop_cmult_big_loop()
588 static void ladder_cmult(u64 *result, u8 *n1, u64 *q) in ladder_cmult()
590 u64 point_buf[40] = { 0 }; in ladder_cmult()
591 u64 *nq = point_buf; in ladder_cmult()
592 u64 *nqpq = point_buf + 10; in ladder_cmult()
593 u64 *nq2 = point_buf + 20; in ladder_cmult()
594 u64 *nqpq2 = point_buf + 30; in ladder_cmult()
601 static __always_inline void format_fexpand(u64 *output, const u8 *input) in format_fexpand()
607 u64 i0, i1, i2, i3, i4, output0, output1, output2, output3, output4; in format_fexpand()
625 static __always_inline void format_fcontract_first_carry_pass(u64 *input) in format_fcontract_first_carry_pass()
627 u64 t0 = input[0]; in format_fcontract_first_carry_pass()
628 u64 t1 = input[1]; in format_fcontract_first_carry_pass()
629 u64 t2 = input[2]; in format_fcontract_first_carry_pass()
630 u64 t3 = input[3]; in format_fcontract_first_carry_pass()
631 u64 t4 = input[4]; in format_fcontract_first_carry_pass()
632 u64 t1_ = t1 + (t0 >> 51); in format_fcontract_first_carry_pass()
633 u64 t0_ = t0 & 0x7ffffffffffffLLU; in format_fcontract_first_carry_pass()
634 u64 t2_ = t2 + (t1_ >> 51); in format_fcontract_first_carry_pass()
635 u64 t1__ = t1_ & 0x7ffffffffffffLLU; in format_fcontract_first_carry_pass()
636 u64 t3_ = t3 + (t2_ >> 51); in format_fcontract_first_carry_pass()
637 u64 t2__ = t2_ & 0x7ffffffffffffLLU; in format_fcontract_first_carry_pass()
638 u64 t4_ = t4 + (t3_ >> 51); in format_fcontract_first_carry_pass()
639 u64 t3__ = t3_ & 0x7ffffffffffffLLU; in format_fcontract_first_carry_pass()
647 static __always_inline void format_fcontract_first_carry_full(u64 *input) in format_fcontract_first_carry_full()
653 static __always_inline void format_fcontract_second_carry_pass(u64 *input) in format_fcontract_second_carry_pass()
655 u64 t0 = input[0]; in format_fcontract_second_carry_pass()
656 u64 t1 = input[1]; in format_fcontract_second_carry_pass()
657 u64 t2 = input[2]; in format_fcontract_second_carry_pass()
658 u64 t3 = input[3]; in format_fcontract_second_carry_pass()
659 u64 t4 = input[4]; in format_fcontract_second_carry_pass()
660 u64 t1_ = t1 + (t0 >> 51); in format_fcontract_second_carry_pass()
661 u64 t0_ = t0 & 0x7ffffffffffffLLU; in format_fcontract_second_carry_pass()
662 u64 t2_ = t2 + (t1_ >> 51); in format_fcontract_second_carry_pass()
663 u64 t1__ = t1_ & 0x7ffffffffffffLLU; in format_fcontract_second_carry_pass()
664 u64 t3_ = t3 + (t2_ >> 51); in format_fcontract_second_carry_pass()
665 u64 t2__ = t2_ & 0x7ffffffffffffLLU; in format_fcontract_second_carry_pass()
666 u64 t4_ = t4 + (t3_ >> 51); in format_fcontract_second_carry_pass()
667 u64 t3__ = t3_ & 0x7ffffffffffffLLU; in format_fcontract_second_carry_pass()
675 static __always_inline void format_fcontract_second_carry_full(u64 *input) in format_fcontract_second_carry_full()
677 u64 i0; in format_fcontract_second_carry_full()
678 u64 i1; in format_fcontract_second_carry_full()
679 u64 i0_; in format_fcontract_second_carry_full()
680 u64 i1_; in format_fcontract_second_carry_full()
691 static __always_inline void format_fcontract_trim(u64 *input) in format_fcontract_trim()
693 u64 a0 = input[0]; in format_fcontract_trim()
694 u64 a1 = input[1]; in format_fcontract_trim()
695 u64 a2 = input[2]; in format_fcontract_trim()
696 u64 a3 = input[3]; in format_fcontract_trim()
697 u64 a4 = input[4]; in format_fcontract_trim()
698 u64 mask0 = u64_gte_mask(a0, 0x7ffffffffffedLLU); in format_fcontract_trim()
699 u64 mask1 = u64_eq_mask(a1, 0x7ffffffffffffLLU); in format_fcontract_trim()
700 u64 mask2 = u64_eq_mask(a2, 0x7ffffffffffffLLU); in format_fcontract_trim()
701 u64 mask3 = u64_eq_mask(a3, 0x7ffffffffffffLLU); in format_fcontract_trim()
702 u64 mask4 = u64_eq_mask(a4, 0x7ffffffffffffLLU); in format_fcontract_trim()
703 u64 mask = (((mask0 & mask1) & mask2) & mask3) & mask4; in format_fcontract_trim()
704 u64 a0_ = a0 - (0x7ffffffffffedLLU & mask); in format_fcontract_trim()
705 u64 a1_ = a1 - (0x7ffffffffffffLLU & mask); in format_fcontract_trim()
706 u64 a2_ = a2 - (0x7ffffffffffffLLU & mask); in format_fcontract_trim()
707 u64 a3_ = a3 - (0x7ffffffffffffLLU & mask); in format_fcontract_trim()
708 u64 a4_ = a4 - (0x7ffffffffffffLLU & mask); in format_fcontract_trim()
716 static __always_inline void format_fcontract_store(u8 *output, u64 *input) in format_fcontract_store()
718 u64 t0 = input[0]; in format_fcontract_store()
719 u64 t1 = input[1]; in format_fcontract_store()
720 u64 t2 = input[2]; in format_fcontract_store()
721 u64 t3 = input[3]; in format_fcontract_store()
722 u64 t4 = input[4]; in format_fcontract_store()
723 u64 o0 = t1 << 51 | t0; in format_fcontract_store()
724 u64 o1 = t2 << 38 | t1 >> 13; in format_fcontract_store()
725 u64 o2 = t3 << 25 | t2 >> 26; in format_fcontract_store()
726 u64 o3 = t4 << 12 | t3 >> 39; in format_fcontract_store()
737 static __always_inline void format_fcontract(u8 *output, u64 *input) in format_fcontract()
745 static __always_inline void format_scalar_of_point(u8 *scalar, u64 *point) in format_scalar_of_point()
747 u64 *x = point; in format_scalar_of_point()
748 u64 *z = point + 5; in format_scalar_of_point()
749 u64 buf[10] __aligned(32) = { 0 }; in format_scalar_of_point()
750 u64 *zmone = buf; in format_scalar_of_point()
751 u64 *sc = buf + 5; in format_scalar_of_point()
761 u64 buf0[10] __aligned(32) = { 0 }; in curve25519_generic()
762 u64 *x0 = buf0; in curve25519_generic()
763 u64 *z = buf0 + 5; in curve25519_generic()
764 u64 *q; in curve25519_generic()
775 u64 buf[15] = { 0 }; in curve25519_generic()
776 u64 *nq = buf; in curve25519_generic()
777 u64 *x = nq; in curve25519_generic()