Lines Matching +full:three +full:- +full:conversion +full:- +full:cycles
1 // SPDX-License-Identifier: GPL-2.0
28 #include "tick-internal.h"
63 * struct tk_fast - NMI safe timekeeper
76 /* Suspend-time cycles value for halted fast timekeeper. */
93 * returns nanoseconds already so no conversion is required, hence mult=1
119 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) { in tk_normalize_xtime()
120 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift; in tk_normalize_xtime()
121 tk->xtime_sec++; in tk_normalize_xtime()
123 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) { in tk_normalize_xtime()
124 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift; in tk_normalize_xtime()
125 tk->raw_sec++; in tk_normalize_xtime()
133 ts.tv_sec = tk->xtime_sec; in tk_xtime()
134 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); in tk_xtime()
140 tk->xtime_sec = ts->tv_sec; in tk_set_xtime()
141 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_set_xtime()
146 tk->xtime_sec += ts->tv_sec; in tk_xtime_add()
147 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_xtime_add()
156 * Verify consistency of: offset_real = -wall_to_monotonic in tk_set_wall_to_mono()
159 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec, in tk_set_wall_to_mono()
160 -tk->wall_to_monotonic.tv_nsec); in tk_set_wall_to_mono()
161 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp)); in tk_set_wall_to_mono()
162 tk->wall_to_monotonic = wtm; in tk_set_wall_to_mono()
163 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec); in tk_set_wall_to_mono()
164 tk->offs_real = timespec64_to_ktime(tmp); in tk_set_wall_to_mono()
165 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0)); in tk_set_wall_to_mono()
170 tk->offs_boot = ktime_add(tk->offs_boot, delta); in tk_update_sleep_time()
175 tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot); in tk_update_sleep_time()
179 * tk_clock_read - atomic clocksource read() helper
188 * a read of the fast-timekeeper tkrs (which is protected by its own locking
193 struct clocksource *clock = READ_ONCE(tkr->clock); in tk_clock_read()
195 return clock->read(clock); in tk_clock_read()
199 #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
204 u64 max_cycles = tk->tkr_mono.clock->max_cycles; in timekeeping_check_update()
205 const char *name = tk->tkr_mono.clock->name; in timekeeping_check_update()
219 if (tk->underflow_seen) { in timekeeping_check_update()
220 if (jiffies - tk->last_warning > WARNING_FREQ) { in timekeeping_check_update()
224 tk->last_warning = jiffies; in timekeeping_check_update()
226 tk->underflow_seen = 0; in timekeeping_check_update()
229 if (tk->overflow_seen) { in timekeeping_check_update()
230 if (jiffies - tk->last_warning > WARNING_FREQ) { in timekeeping_check_update()
234 tk->last_warning = jiffies; in timekeeping_check_update()
236 tk->overflow_seen = 0; in timekeeping_check_update()
240 static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles);
258 last = tkr->cycle_last; in timekeeping_debug_get_ns()
259 mask = tkr->mask; in timekeeping_debug_get_ns()
260 max = tkr->clock->max_cycles; in timekeeping_debug_get_ns()
267 * mask-relative negative values. in timekeeping_debug_get_ns()
270 tk->underflow_seen = 1; in timekeeping_debug_get_ns()
274 tk->overflow_seen = 1; in timekeeping_debug_get_ns()
290 * tk_setup_internals - Set up internals to use clocksource clock.
306 ++tk->cs_was_changed_seq; in tk_setup_internals()
307 old_clock = tk->tkr_mono.clock; in tk_setup_internals()
308 tk->tkr_mono.clock = clock; in tk_setup_internals()
309 tk->tkr_mono.mask = clock->mask; in tk_setup_internals()
310 tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono); in tk_setup_internals()
312 tk->tkr_raw.clock = clock; in tk_setup_internals()
313 tk->tkr_raw.mask = clock->mask; in tk_setup_internals()
314 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; in tk_setup_internals()
316 /* Do the ns -> cycle conversion first, using original mult */ in tk_setup_internals()
318 tmp <<= clock->shift; in tk_setup_internals()
320 tmp += clock->mult/2; in tk_setup_internals()
321 do_div(tmp, clock->mult); in tk_setup_internals()
326 tk->cycle_interval = interval; in tk_setup_internals()
328 /* Go back from cycles -> shifted ns */ in tk_setup_internals()
329 tk->xtime_interval = interval * clock->mult; in tk_setup_internals()
330 tk->xtime_remainder = ntpinterval - tk->xtime_interval; in tk_setup_internals()
331 tk->raw_interval = interval * clock->mult; in tk_setup_internals()
335 int shift_change = clock->shift - old_clock->shift; in tk_setup_internals()
337 tk->tkr_mono.xtime_nsec >>= -shift_change; in tk_setup_internals()
338 tk->tkr_raw.xtime_nsec >>= -shift_change; in tk_setup_internals()
340 tk->tkr_mono.xtime_nsec <<= shift_change; in tk_setup_internals()
341 tk->tkr_raw.xtime_nsec <<= shift_change; in tk_setup_internals()
345 tk->tkr_mono.shift = clock->shift; in tk_setup_internals()
346 tk->tkr_raw.shift = clock->shift; in tk_setup_internals()
348 tk->ntp_error = 0; in tk_setup_internals()
349 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; in tk_setup_internals()
350 tk->ntp_tick = ntpinterval << tk->ntp_error_shift; in tk_setup_internals()
357 tk->tkr_mono.mult = clock->mult; in tk_setup_internals()
358 tk->tkr_raw.mult = clock->mult; in tk_setup_internals()
359 tk->ntp_err_mult = 0; in tk_setup_internals()
360 tk->skip_second_overflow = 0; in tk_setup_internals()
366 return mul_u64_u32_add_u64_shr(delta, tkr->mult, tkr->xtime_nsec, tkr->shift); in delta_to_ns_safe()
369 static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles) in timekeeping_cycles_to_ns() argument
372 u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask; in timekeeping_cycles_to_ns()
376 * overflows the multiplication with tkr->mult. in timekeeping_cycles_to_ns()
378 if (unlikely(delta > tkr->clock->max_cycles)) { in timekeeping_cycles_to_ns()
385 return tkr->xtime_nsec >> tkr->shift; in timekeeping_cycles_to_ns()
390 return ((delta * tkr->mult) + tkr->xtime_nsec) >> tkr->shift; in timekeeping_cycles_to_ns()
407 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
424 struct tk_read_base *base = tkf->base; in update_fast_timekeeper()
427 raw_write_seqcount_latch(&tkf->seq); in update_fast_timekeeper()
433 raw_write_seqcount_latch(&tkf->seq); in update_fast_timekeeper()
446 seq = raw_read_seqcount_latch(&tkf->seq); in __ktime_get_fast_ns()
447 tkr = tkf->base + (seq & 0x01); in __ktime_get_fast_ns()
448 now = ktime_to_ns(tkr->base); in __ktime_get_fast_ns()
450 } while (raw_read_seqcount_latch_retry(&tkf->seq, seq)); in __ktime_get_fast_ns()
456 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
473 * |12345678---> reader order
494 * ktime_get_raw_fast_ns - Fast NMI safe access to clock monotonic raw
497 * conversion factor is not affected by NTP/PTP correction.
506 * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
522 * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
523 * partially updated. Since the tk->offs_boot update is a rare event, this
533 return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_boot))); in ktime_get_boot_fast_ns()
538 * ktime_get_tai_fast_ns - NMI safe and fast access to tai clock.
550 return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_tai))); in ktime_get_tai_fast_ns()
561 seq = raw_read_seqcount_latch(&tkf->seq); in __ktime_get_real_fast()
562 tkr = tkf->base + (seq & 0x01); in __ktime_get_real_fast()
563 basem = ktime_to_ns(tkr->base); in __ktime_get_real_fast()
564 baser = ktime_to_ns(tkr->base_real); in __ktime_get_real_fast()
566 } while (raw_read_seqcount_latch_retry(&tkf->seq, seq)); in __ktime_get_real_fast()
574 * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
585 * ktime_get_fast_timestamps: - NMI safe timestamps
634 snapshot->real = __ktime_get_real_fast(&tk_fast_mono, &snapshot->mono); in ktime_get_fast_timestamps()
635 snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot)); in ktime_get_fast_timestamps()
639 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
645 * number of cycles every time until timekeeping is resumed at which time the
651 const struct tk_read_base *tkr = &tk->tkr_mono; in halt_fast_timekeeper()
656 tkr_dummy.base_real = tkr->base + tk->offs_real; in halt_fast_timekeeper()
659 tkr = &tk->tkr_raw; in halt_fast_timekeeper()
673 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
692 * pvclock_gtod_unregister_notifier - unregister a pvclock
710 * tk_update_leap_state - helper to update the next_leap_ktime
714 tk->next_leap_ktime = ntp_get_next_leap(); in tk_update_leap_state()
715 if (tk->next_leap_ktime != KTIME_MAX) in tk_update_leap_state()
717 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real); in tk_update_leap_state()
735 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec); in tk_update_ktime_data()
736 nsec = (u32) tk->wall_to_monotonic.tv_nsec; in tk_update_ktime_data()
737 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); in tk_update_ktime_data()
742 * this into account before updating tk->ktime_sec. in tk_update_ktime_data()
744 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); in tk_update_ktime_data()
747 tk->ktime_sec = seconds; in tk_update_ktime_data()
750 tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC); in tk_update_ktime_data()
757 tk->ntp_error = 0; in timekeeping_update()
767 tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real; in timekeeping_update()
768 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono); in timekeeping_update()
769 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw); in timekeeping_update()
772 tk->clock_was_set_seq++; in timekeeping_update()
774 * The mirroring of the data to the shadow-timekeeper needs in timekeeping_update()
775 * to happen last here to ensure we don't over-write the in timekeeping_update()
784 * timekeeping_forward_now - update clock to the current time
795 cycle_now = tk_clock_read(&tk->tkr_mono); in timekeeping_forward_now()
796 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); in timekeeping_forward_now()
797 tk->tkr_mono.cycle_last = cycle_now; in timekeeping_forward_now()
798 tk->tkr_raw.cycle_last = cycle_now; in timekeeping_forward_now()
801 u64 max = tk->tkr_mono.clock->max_cycles; in timekeeping_forward_now()
804 tk->tkr_mono.xtime_nsec += incr * tk->tkr_mono.mult; in timekeeping_forward_now()
805 tk->tkr_raw.xtime_nsec += incr * tk->tkr_raw.mult; in timekeeping_forward_now()
807 delta -= incr; in timekeeping_forward_now()
812 * ktime_get_real_ts64 - Returns the time of day in a timespec64.
828 ts->tv_sec = tk->xtime_sec; in ktime_get_real_ts64()
829 nsecs = timekeeping_get_ns(&tk->tkr_mono); in ktime_get_real_ts64()
833 ts->tv_nsec = 0; in ktime_get_real_ts64()
849 base = tk->tkr_mono.base; in ktime_get()
850 nsecs = timekeeping_get_ns(&tk->tkr_mono); in ktime_get()
868 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift; in ktime_get_resolution_ns()
892 base = ktime_add(tk->tkr_mono.base, *offset); in ktime_get_with_offset()
893 nsecs = timekeeping_get_ns(&tk->tkr_mono); in ktime_get_with_offset()
913 base = ktime_add(tk->tkr_mono.base, *offset); in ktime_get_coarse_with_offset()
914 nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; in ktime_get_coarse_with_offset()
923 * ktime_mono_to_any() - convert monotonic time to any other time
943 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
954 base = tk->tkr_raw.base; in ktime_get_raw()
955 nsecs = timekeeping_get_ns(&tk->tkr_raw); in ktime_get_raw()
964 * ktime_get_ts64 - get the monotonic clock in timespec64 format
982 ts->tv_sec = tk->xtime_sec; in ktime_get_ts64()
983 nsec = timekeeping_get_ns(&tk->tkr_mono); in ktime_get_ts64()
984 tomono = tk->wall_to_monotonic; in ktime_get_ts64()
988 ts->tv_sec += tomono.tv_sec; in ktime_get_ts64()
989 ts->tv_nsec = 0; in ktime_get_ts64()
995 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
998 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
1008 return tk->ktime_sec; in ktime_get_seconds()
1013 * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
1017 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
1019 * counter to provide "atomic" access to the 64bit tk->xtime_sec
1029 return tk->xtime_sec; in ktime_get_real_seconds()
1033 seconds = tk->xtime_sec; in ktime_get_real_seconds()
1042 * __ktime_get_real_seconds - The same as ktime_get_real_seconds
1050 return tk->xtime_sec; in __ktime_get_real_seconds()
1054 * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
1071 now = tk_clock_read(&tk->tkr_mono); in ktime_get_snapshot()
1072 systime_snapshot->cs_id = tk->tkr_mono.clock->id; in ktime_get_snapshot()
1073 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq; in ktime_get_snapshot()
1074 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq; in ktime_get_snapshot()
1075 base_real = ktime_add(tk->tkr_mono.base, in ktime_get_snapshot()
1077 base_raw = tk->tkr_raw.base; in ktime_get_snapshot()
1078 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now); in ktime_get_snapshot()
1079 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now); in ktime_get_snapshot()
1082 systime_snapshot->cycles = now; in ktime_get_snapshot()
1083 systime_snapshot->real = ktime_add_ns(base_real, nsec_real); in ktime_get_snapshot()
1084 systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw); in ktime_get_snapshot()
1095 if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) || in scale64_check_overflow()
1096 ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem))) in scale64_check_overflow()
1097 return -EOVERFLOW; in scale64_check_overflow()
1106 * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
1109 * @total_history_cycles: Total history length in cycles
1139 total_history_cycles - partial_history_cycles : in adjust_historical_crosststamp()
1147 ktime_sub(ts->sys_monoraw, history->raw)); in adjust_historical_crosststamp()
1162 (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult); in adjust_historical_crosststamp()
1165 ktime_sub(ts->sys_realtime, history->real)); in adjust_historical_crosststamp()
1174 ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw); in adjust_historical_crosststamp()
1175 ts->sys_realtime = ktime_add_ns(history->real, corr_real); in adjust_historical_crosststamp()
1177 ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw); in adjust_historical_crosststamp()
1178 ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real); in adjust_historical_crosststamp()
1185 * timestamp_in_interval - true if ts is chronologically in [start, end]
1217 if (cs->id == scv->cs_id) in convert_base_to_cs()
1222 * re-evaluating @base as the clocksource might change concurrently. in convert_base_to_cs()
1224 base = READ_ONCE(cs->base); in convert_base_to_cs()
1225 if (!base || base->id != scv->cs_id) in convert_base_to_cs()
1228 num = scv->use_nsecs ? cs->freq_khz : base->numerator; in convert_base_to_cs()
1229 den = scv->use_nsecs ? USEC_PER_SEC : base->denominator; in convert_base_to_cs()
1231 if (!convert_clock(&scv->cycles, num, den)) in convert_base_to_cs()
1234 scv->cycles += base->offset; in convert_base_to_cs()
1238 static bool convert_cs_to_base(u64 *cycles, enum clocksource_ids base_id) in convert_cs_to_base() argument
1245 * re-evaluating @base as the clocksource might change concurrently. in convert_cs_to_base()
1247 base = READ_ONCE(cs->base); in convert_cs_to_base()
1248 if (!base || base->id != base_id) in convert_cs_to_base()
1251 *cycles -= base->offset; in convert_cs_to_base()
1252 if (!convert_clock(cycles, base->denominator, base->numerator)) in convert_cs_to_base()
1261 if (BITS_TO_BYTES(fls64(*delta) + tkr->shift) >= sizeof(*delta)) in convert_ns_to_cs()
1264 *delta = div_u64((*delta << tkr->shift) - tkr->xtime_nsec, tkr->mult); in convert_ns_to_cs()
1269 * ktime_real_to_base_clock() - Convert CLOCK_REALTIME timestamp to a base clock timestamp
1272 * @cycles: pointer to store the converted base clock timestamp
1276 * Return: true if the conversion is successful, false otherwise.
1278 bool ktime_real_to_base_clock(ktime_t treal, enum clocksource_ids base_id, u64 *cycles) in ktime_real_to_base_clock() argument
1286 if ((u64)treal < tk->tkr_mono.base_real) in ktime_real_to_base_clock()
1288 delta = (u64)treal - tk->tkr_mono.base_real; in ktime_real_to_base_clock()
1291 *cycles = tk->tkr_mono.cycle_last + delta; in ktime_real_to_base_clock()
1292 if (!convert_cs_to_base(cycles, base_id)) in ktime_real_to_base_clock()
1301 * get_device_system_crosststamp - Synchronously capture system/device timestamp
1321 u64 cycles, now, interval_start; in get_device_system_crosststamp() local
1336 ret = get_time_fn(&xtstamp->device, &system_counterval, ctx); in get_device_system_crosststamp()
1347 return -ENODEV; in get_device_system_crosststamp()
1348 cycles = system_counterval.cycles; in get_device_system_crosststamp()
1354 now = tk_clock_read(&tk->tkr_mono); in get_device_system_crosststamp()
1355 interval_start = tk->tkr_mono.cycle_last; in get_device_system_crosststamp()
1356 if (!timestamp_in_interval(interval_start, now, cycles)) { in get_device_system_crosststamp()
1357 clock_was_set_seq = tk->clock_was_set_seq; in get_device_system_crosststamp()
1358 cs_was_changed_seq = tk->cs_was_changed_seq; in get_device_system_crosststamp()
1359 cycles = interval_start; in get_device_system_crosststamp()
1365 base_real = ktime_add(tk->tkr_mono.base, in get_device_system_crosststamp()
1367 base_raw = tk->tkr_raw.base; in get_device_system_crosststamp()
1369 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, cycles); in get_device_system_crosststamp()
1370 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, cycles); in get_device_system_crosststamp()
1373 xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real); in get_device_system_crosststamp()
1374 xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw); in get_device_system_crosststamp()
1390 !timestamp_in_interval(history_begin->cycles, in get_device_system_crosststamp()
1391 cycles, system_counterval.cycles) || in get_device_system_crosststamp()
1392 history_begin->cs_was_changed_seq != cs_was_changed_seq) in get_device_system_crosststamp()
1393 return -EINVAL; in get_device_system_crosststamp()
1394 partial_history_cycles = cycles - system_counterval.cycles; in get_device_system_crosststamp()
1395 total_history_cycles = cycles - history_begin->cycles; in get_device_system_crosststamp()
1397 history_begin->clock_was_set_seq != clock_was_set_seq; in get_device_system_crosststamp()
1412 * timekeeping_clocksource_has_base - Check whether the current clocksource
1426 * count. Just prevent the compiler from re-evaluating @base as the in timekeeping_clocksource_has_base()
1429 struct clocksource_base *base = READ_ONCE(tk_core.timekeeper.tkr_mono.clock->base); in timekeeping_clocksource_has_base()
1431 return base ? base->id == id : false; in timekeeping_clocksource_has_base()
1436 * do_settimeofday64 - Sets the time of day.
1449 return -EINVAL; in do_settimeofday64()
1459 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) { in do_settimeofday64()
1460 ret = -EINVAL; in do_settimeofday64()
1464 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta)); in do_settimeofday64()
1486 * timekeeping_inject_offset - Adds or subtracts from the current time.
1498 if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC) in timekeeping_inject_offset()
1499 return -EINVAL; in timekeeping_inject_offset()
1508 if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 || in timekeeping_inject_offset()
1510 ret = -EINVAL; in timekeeping_inject_offset()
1515 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts)); in timekeeping_inject_offset()
1545 * - TYT, 1992-01-01
1564 * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
1568 tk->tai_offset = tai_offset; in __timekeeping_set_tai_offset()
1569 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0)); in __timekeeping_set_tai_offset()
1573 * change_clocksource - Swaps clocksources if a new one is available
1588 * for built-in code (owner == NULL) as well. in change_clocksource()
1590 if (try_module_get(new->owner)) { in change_clocksource()
1591 if (!new->enable || new->enable(new) == 0) in change_clocksource()
1594 module_put(new->owner); in change_clocksource()
1603 old = tk->tkr_mono.clock; in change_clocksource()
1613 if (old->disable) in change_clocksource()
1614 old->disable(old); in change_clocksource()
1616 module_put(old->owner); in change_clocksource()
1623 * timekeeping_notify - Install a new clock source
1633 if (tk->tkr_mono.clock == clock) in timekeeping_notify()
1637 return tk->tkr_mono.clock == clock ? 0 : -1; in timekeeping_notify()
1641 * ktime_get_raw_ts64 - Returns the raw monotonic time in a timespec
1644 * Returns the raw monotonic time (completely un-modified by ntp)
1654 ts->tv_sec = tk->raw_sec; in ktime_get_raw_ts64()
1655 nsecs = timekeeping_get_ns(&tk->tkr_raw); in ktime_get_raw_ts64()
1659 ts->tv_nsec = 0; in ktime_get_raw_ts64()
1666 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1677 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; in timekeeping_valid_for_hres()
1685 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
1696 ret = tk->tkr_mono.clock->max_idle_ns; in timekeeping_max_deferment()
1704 * read_persistent_clock64 - Return time from the persistent clock.
1711 * XXX - Do be sure to remove it once all arches implement it.
1715 ts->tv_sec = 0; in read_persistent_clock64()
1716 ts->tv_nsec = 0; in read_persistent_clock64()
1720 * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
1723 * @boot_offset: offset that is defined as wall_time - boot_time
1759 * timekeeping_init - Initializes the clocksource and common timekeeping values
1791 if (clock->enable) in timekeeping_init()
1792 clock->enable(clock); in timekeeping_init()
1796 tk->raw_sec = 0; in timekeeping_init()
1810 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1827 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta)); in __timekeeping_inject_sleeptime()
1834 * We have three kinds of time sources to use for sleep time
1836 * 1) non-stop clocksource
1869 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1904 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1909 struct clocksource *clock = tk->tkr_mono.clock; in timekeeping_resume()
1931 * suspend-nonstop clocksource -> persistent clock -> rtc in timekeeping_resume()
1935 cycle_now = tk_clock_read(&tk->tkr_mono); in timekeeping_resume()
1950 /* Re-base the last cycle value */ in timekeeping_resume()
1951 tk->tkr_mono.cycle_last = cycle_now; in timekeeping_resume()
1952 tk->tkr_raw.cycle_last = cycle_now; in timekeeping_resume()
1954 tk->ntp_error = 0; in timekeeping_resume()
1999 curr_clock = tk->tkr_mono.clock; in timekeeping_suspend()
2000 cycle_now = tk->tkr_mono.cycle_last; in timekeeping_suspend()
2057 s64 interval = tk->cycle_interval; in timekeeping_apply_adjustment()
2061 } else if (mult_adj == -1) { in timekeeping_apply_adjustment()
2062 interval = -interval; in timekeeping_apply_adjustment()
2063 offset = -offset; in timekeeping_apply_adjustment()
2088 * So offset stores the non-accumulated cycles. Thus the current in timekeeping_apply_adjustment()
2112 * xtime_nsec_2 = xtime_nsec_1 - offset in timekeeping_apply_adjustment()
2114 * xtime_nsec -= offset in timekeeping_apply_adjustment()
2116 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) { in timekeeping_apply_adjustment()
2122 tk->tkr_mono.mult += mult_adj; in timekeeping_apply_adjustment()
2123 tk->xtime_interval += interval; in timekeeping_apply_adjustment()
2124 tk->tkr_mono.xtime_nsec -= offset; in timekeeping_apply_adjustment()
2139 if (likely(tk->ntp_tick == ntp_tick_length())) { in timekeeping_adjust()
2140 mult = tk->tkr_mono.mult - tk->ntp_err_mult; in timekeeping_adjust()
2142 tk->ntp_tick = ntp_tick_length(); in timekeeping_adjust()
2143 mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) - in timekeeping_adjust()
2144 tk->xtime_remainder, tk->cycle_interval); in timekeeping_adjust()
2151 * ahead until the tick length changes to a non-divisible value. in timekeeping_adjust()
2153 tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0; in timekeeping_adjust()
2154 mult += tk->ntp_err_mult; in timekeeping_adjust()
2156 timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult); in timekeeping_adjust()
2158 if (unlikely(tk->tkr_mono.clock->maxadj && in timekeeping_adjust()
2159 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult) in timekeeping_adjust()
2160 > tk->tkr_mono.clock->maxadj))) { in timekeeping_adjust()
2163 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult, in timekeeping_adjust()
2164 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj); in timekeeping_adjust()
2177 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) { in timekeeping_adjust()
2178 tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC << in timekeeping_adjust()
2179 tk->tkr_mono.shift; in timekeeping_adjust()
2180 tk->xtime_sec--; in timekeeping_adjust()
2181 tk->skip_second_overflow = 1; in timekeeping_adjust()
2186 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
2194 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift; in accumulate_nsecs_to_secs()
2197 while (tk->tkr_mono.xtime_nsec >= nsecps) { in accumulate_nsecs_to_secs()
2200 tk->tkr_mono.xtime_nsec -= nsecps; in accumulate_nsecs_to_secs()
2201 tk->xtime_sec++; in accumulate_nsecs_to_secs()
2207 if (unlikely(tk->skip_second_overflow)) { in accumulate_nsecs_to_secs()
2208 tk->skip_second_overflow = 0; in accumulate_nsecs_to_secs()
2213 leap = second_overflow(tk->xtime_sec); in accumulate_nsecs_to_secs()
2217 tk->xtime_sec += leap; in accumulate_nsecs_to_secs()
2222 timespec64_sub(tk->wall_to_monotonic, ts)); in accumulate_nsecs_to_secs()
2224 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap); in accumulate_nsecs_to_secs()
2233 * logarithmic_accumulation - shifted accumulation of cycles
2235 * This functions accumulates a shifted interval of cycles into
2239 * Returns the unconsumed cycles.
2244 u64 interval = tk->cycle_interval << shift; in logarithmic_accumulation()
2252 offset -= interval; in logarithmic_accumulation()
2253 tk->tkr_mono.cycle_last += interval; in logarithmic_accumulation()
2254 tk->tkr_raw.cycle_last += interval; in logarithmic_accumulation()
2256 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift; in logarithmic_accumulation()
2260 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift; in logarithmic_accumulation()
2261 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift; in logarithmic_accumulation()
2262 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) { in logarithmic_accumulation()
2263 tk->tkr_raw.xtime_nsec -= snsec_per_sec; in logarithmic_accumulation()
2264 tk->raw_sec++; in logarithmic_accumulation()
2268 tk->ntp_error += tk->ntp_tick << shift; in logarithmic_accumulation()
2269 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) << in logarithmic_accumulation()
2270 (tk->ntp_error_shift + shift); in logarithmic_accumulation()
2276 * timekeeping_advance - Updates the timekeeper to the current time and
2294 offset = clocksource_delta(tk_clock_read(&tk->tkr_mono), in timekeeping_advance()
2295 tk->tkr_mono.cycle_last, tk->tkr_mono.mask); in timekeeping_advance()
2298 if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK) in timekeeping_advance()
2312 shift = ilog2(offset) - ilog2(tk->cycle_interval); in timekeeping_advance()
2315 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; in timekeeping_advance()
2317 while (offset >= tk->cycle_interval) { in timekeeping_advance()
2320 if (offset < tk->cycle_interval<<shift) in timekeeping_advance()
2321 shift--; in timekeeping_advance()
2355 * update_wall_time - Uses the current clocksource to increment the wall time
2365 * getboottime64 - Return the real time of system boot.
2368 * Returns the wall-time of boot in a timespec64.
2378 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot); in getboottime64()
2407 mono = tk->wall_to_monotonic; in ktime_get_coarse_ts64()
2425 * ktime_get_update_offsets_now - hrtimer helper
2427 * @offs_real: pointer to storage for monotonic -> realtime offset
2428 * @offs_boot: pointer to storage for monotonic -> boottime offset
2429 * @offs_tai: pointer to storage for monotonic -> clock tai offset
2448 base = tk->tkr_mono.base; in ktime_get_update_offsets_now()
2449 nsecs = timekeeping_get_ns(&tk->tkr_mono); in ktime_get_update_offsets_now()
2452 if (*cwsseq != tk->clock_was_set_seq) { in ktime_get_update_offsets_now()
2453 *cwsseq = tk->clock_was_set_seq; in ktime_get_update_offsets_now()
2454 *offs_real = tk->offs_real; in ktime_get_update_offsets_now()
2455 *offs_boot = tk->offs_boot; in ktime_get_update_offsets_now()
2456 *offs_tai = tk->offs_tai; in ktime_get_update_offsets_now()
2460 if (unlikely(base >= tk->next_leap_ktime)) in ktime_get_update_offsets_now()
2461 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0)); in ktime_get_update_offsets_now()
2469 * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
2473 if (txc->modes & ADJ_ADJTIME) { in timekeeping_validate_timex()
2475 if (!(txc->modes & ADJ_OFFSET_SINGLESHOT)) in timekeeping_validate_timex()
2476 return -EINVAL; in timekeeping_validate_timex()
2477 if (!(txc->modes & ADJ_OFFSET_READONLY) && in timekeeping_validate_timex()
2479 return -EPERM; in timekeeping_validate_timex()
2481 /* In order to modify anything, you gotta be super-user! */ in timekeeping_validate_timex()
2482 if (txc->modes && !capable(CAP_SYS_TIME)) in timekeeping_validate_timex()
2483 return -EPERM; in timekeeping_validate_timex()
2488 if (txc->modes & ADJ_TICK && in timekeeping_validate_timex()
2489 (txc->tick < 900000/USER_HZ || in timekeeping_validate_timex()
2490 txc->tick > 1100000/USER_HZ)) in timekeeping_validate_timex()
2491 return -EINVAL; in timekeeping_validate_timex()
2494 if (txc->modes & ADJ_SETOFFSET) { in timekeeping_validate_timex()
2495 /* In order to inject time, you gotta be super-user! */ in timekeeping_validate_timex()
2497 return -EPERM; in timekeeping_validate_timex()
2504 * The field tv_usec/tv_nsec must always be non-negative and in timekeeping_validate_timex()
2507 if (txc->time.tv_usec < 0) in timekeeping_validate_timex()
2508 return -EINVAL; in timekeeping_validate_timex()
2510 if (txc->modes & ADJ_NANO) { in timekeeping_validate_timex()
2511 if (txc->time.tv_usec >= NSEC_PER_SEC) in timekeeping_validate_timex()
2512 return -EINVAL; in timekeeping_validate_timex()
2514 if (txc->time.tv_usec >= USEC_PER_SEC) in timekeeping_validate_timex()
2515 return -EINVAL; in timekeeping_validate_timex()
2521 * only happen on 64-bit systems: in timekeeping_validate_timex()
2523 if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) { in timekeeping_validate_timex()
2524 if (LLONG_MIN / PPM_SCALE > txc->freq) in timekeeping_validate_timex()
2525 return -EINVAL; in timekeeping_validate_timex()
2526 if (LLONG_MAX / PPM_SCALE < txc->freq) in timekeeping_validate_timex()
2527 return -EINVAL; in timekeeping_validate_timex()
2534 * random_get_entropy_fallback - Returns the raw clock source value,
2540 struct clocksource *clock = READ_ONCE(tkr->clock); in random_get_entropy_fallback()
2544 return clock->read(clock); in random_get_entropy_fallback()
2549 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
2569 if (txc->modes & ADJ_SETOFFSET) { in do_adjtimex()
2571 delta.tv_sec = txc->time.tv_sec; in do_adjtimex()
2572 delta.tv_nsec = txc->time.tv_usec; in do_adjtimex()
2573 if (!(txc->modes & ADJ_NANO)) in do_adjtimex()
2591 orig_tai = tai = tk->tai_offset; in do_adjtimex()
2607 if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK)) in do_adjtimex()
2620 * hardpps() - Accessor function to NTP __hardpps function