Lines Matching +full:high +full:- +full:to +full:- +full:low
1 /* SPDX-License-Identifier: GPL-2.0 */
16 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
19 * @remainder: pointer to unsigned 32bit remainder
23 * This is commonly provided by 32bit archs to provide an optimized 64bit
33 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
36 * @remainder: pointer to signed 32bit remainder
47 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
50 * @remainder: pointer to unsigned 64bit remainder
61 * div64_u64 - unsigned 64bit divide with 64bit divisor
73 * div64_s64 - signed 64bit divide with 64bit divisor
116 * div_u64 - unsigned 64bit divide with 32bit divisor
135 * div_s64 - signed 64bit divide with 32bit divisor
153 * Many a GCC version messes this up and generates a 64x64 mult :-(
187 ret += mul_u32_u32(ah, mul) << (32 - shift); in mul_u64_u32_shr()
199 u32 high, low; in mul_u64_u64_shr() member
201 u32 low, high; in mul_u64_u64_shr()
210 rl.ll = mul_u32_u32(a0.l.low, b0.l.low); in mul_u64_u64_shr()
211 rm.ll = mul_u32_u32(a0.l.low, b0.l.high); in mul_u64_u64_shr()
212 rn.ll = mul_u32_u32(a0.l.high, b0.l.low); in mul_u64_u64_shr()
213 rh.ll = mul_u32_u32(a0.l.high, b0.l.high); in mul_u64_u64_shr()
216 * Each of these lines computes a 64-bit intermediate result into "c", in mul_u64_u64_shr()
217 * starting at bits 32-95. The low 32-bits go into the result of the in mul_u64_u64_shr()
218 * multiplication, the high 32-bits are carried into the next step. in mul_u64_u64_shr()
220 rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low; in mul_u64_u64_shr()
221 rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low; in mul_u64_u64_shr()
222 rh.l.high = (c >> 32) + rh.l.high; in mul_u64_u64_shr()
225 * The 128-bit result of the multiplication is in rl.ll and rh.ll, in mul_u64_u64_shr()
226 * shift it right and throw away the high part of the result. in mul_u64_u64_shr()
231 return (rl.ll >> shift) | (rh.ll << (64 - shift)); in mul_u64_u64_shr()
250 ret = -((s64) ret); in mul_s64_u64_shr()
263 u32 high, low; in mul_u64_u32_div() member
265 u32 low, high; in mul_u64_u32_div()
271 rl.ll = mul_u32_u32(u.l.low, mul); in mul_u64_u32_div()
272 rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high; in mul_u64_u32_div()
274 /* Bits 32-63 of the result will be in rh.l.low. */ in mul_u64_u32_div()
275 rl.l.high = do_div(rh.ll, divisor); in mul_u64_u32_div()
277 /* Bits 0-31 of the result will be in rl.l.low. */ in mul_u64_u32_div()
280 rl.l.high = rh.l.low; in mul_u64_u32_div()
288 * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
298 ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
301 * DIV_U64_ROUND_UP - unsigned 64bit divide with 32bit divisor rounded up
311 ({ u32 _tmp = (d); div_u64((ll) + _tmp - 1, _tmp); })
314 * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
319 * and round to closest integer.
321 * Return: dividend / divisor rounded to nearest integer
327 * DIV_U64_ROUND_CLOSEST - unsigned 64bit divide with 32bit divisor rounded to nearest integer
332 * and round to closest integer.
334 * Return: dividend / divisor rounded to nearest integer
340 * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
345 * and round to closest integer.
347 * Return: dividend / divisor rounded to nearest integer
355 div_s64((__x - (__d / 2)), __d); \
360 * roundup_u64 - Round up a 64bit value to the next specified 32bit multiple
361 * @x: the value to up
362 * @y: 32bit multiple to round up to
364 * Rounds @x to the next multiple of @y. For 32bit @x values, see roundup and