Lines Matching +full:int +full:- +full:clock +full:- +full:stable +full:- +full:broken
1 // SPDX-License-Identifier: GPL-2.0-only
18 * clock with bounded drift between CPUs. The value of cpu_clock(i)
29 * cpu_clock(i) -- can be used from any context, including NMI.
30 * local_clock() -- is cpu_clock() on the current CPU.
41 * Otherwise it tries to create a semi stable clock from a mixture of other
44 * - GTOD (clock monotonic)
45 * - sched_clock()
46 * - explicit idle events
58 * Scheduler clock - returns current time in nanosec units.
60 * Architectures and sub-architectures can override this.
64 return (unsigned long long)(jiffies - INITIAL_JIFFIES) in sched_clock()
73 * We must start with !__sched_clock_stable because the unstable -> stable
74 * transition is accurate, while the stable -> unstable transition is not.
77 * will become stable, such that there's only a single 1 -> 0 transition.
80 static int __sched_clock_stable_early = 1;
91 u64 clock; member
101 notrace static inline struct sched_clock_data *cpu_sdc(int cpu) in cpu_sdc()
106 notrace int sched_clock_stable(void) in sched_clock_stable()
113 scd->tick_gtod = ktime_get_ns(); in __scd_stamp()
114 scd->tick_raw = sched_clock(); in __scd_stamp()
123 * to disable IRQs in order to get a consistent scd->tick* reading. in __set_sched_clock_stable()
128 * Attempt to make the (initial) unstable->stable transition continuous. in __set_sched_clock_stable()
130 __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw); in __set_sched_clock_stable()
133 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", in __set_sched_clock_stable()
134 scd->tick_gtod, __gtod_offset, in __set_sched_clock_stable()
135 scd->tick_raw, __sched_clock_offset); in __set_sched_clock_stable()
142 * If we ever get here, we're screwed, because we found out -- typically after
143 * the fact -- that TSC wasn't good. This means all our clocksources (including
149 * The only way to fully avoid random clock jumps is to boot with:
155 int cpu; in __sched_clock_work()
161 scd->clock = scd->tick_gtod + __gtod_offset; in __sched_clock_work()
168 …printk(KERN_WARNING "TSC found unstable after boot, most likely due to broken BIOS. Use 'tsc=unsta… in __sched_clock_work()
169 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", in __sched_clock_work()
170 scd->tick_gtod, __gtod_offset, in __sched_clock_work()
171 scd->tick_raw, __sched_clock_offset); in __sched_clock_work()
202 __gtod_offset = (scd->tick_raw + __sched_clock_offset) - scd->tick_gtod; in __sched_clock_gtod_offset()
221 * We run this as late_initcall() such that it runs after all built-in drivers,
224 static int __init sched_clock_init_late(void) in sched_clock_init_late()
249 return (s64)(x - y) < 0 ? x : y; in wrap_min()
254 return (s64)(x - y) > 0 ? x : y; in wrap_max()
260 * - filter out backward motion
261 * - use the GTOD tick value to create a window to filter crazy TSC values
265 u64 now, clock, old_clock, min_clock, max_clock, gtod; in sched_clock_local() local
270 delta = now - scd->tick_raw; in sched_clock_local()
274 old_clock = scd->clock; in sched_clock_local()
277 * scd->clock = clamp(scd->tick_gtod + delta, in sched_clock_local()
278 * max(scd->tick_gtod, scd->clock), in sched_clock_local()
279 * scd->tick_gtod + TICK_NSEC); in sched_clock_local()
282 gtod = scd->tick_gtod + __gtod_offset; in sched_clock_local()
283 clock = gtod + delta; in sched_clock_local()
287 clock = wrap_max(clock, min_clock); in sched_clock_local()
288 clock = wrap_min(clock, max_clock); in sched_clock_local()
290 if (!raw_try_cmpxchg64(&scd->clock, &old_clock, clock)) in sched_clock_local()
293 return clock; in sched_clock_local()
298 u64 clock; in local_clock_noinstr() local
306 clock = sched_clock_local(this_scd()); in local_clock_noinstr()
308 return clock; in local_clock_noinstr()
330 * Careful here: The local and the remote clock values need to in sched_clock_remote()
336 * 32-bit kernels as an NMI could use sched_clock_local() via the in sched_clock_remote()
338 * the low 32-bit and the high 32-bit portion. in sched_clock_remote()
342 * We must enforce atomic readout on 32-bit, otherwise the in sched_clock_remote()
344 * the low 32-bit and the high 32-bit portion. in sched_clock_remote()
346 remote_clock = cmpxchg64(&scd->clock, 0, 0); in sched_clock_remote()
349 * On 64-bit kernels the read of [my]scd->clock is atomic versus the in sched_clock_remote()
350 * update, so we can avoid the above 32-bit dance. in sched_clock_remote()
354 this_clock = my_scd->clock; in sched_clock_remote()
355 remote_clock = scd->clock; in sched_clock_remote()
364 if (likely((s64)(remote_clock - this_clock) < 0)) { in sched_clock_remote()
365 ptr = &scd->clock; in sched_clock_remote()
372 ptr = &my_scd->clock; in sched_clock_remote()
388 notrace u64 sched_clock_cpu(int cpu) in sched_clock_cpu()
391 u64 clock; in sched_clock_cpu() local
403 clock = sched_clock_remote(scd); in sched_clock_cpu()
405 clock = sched_clock_local(scd); in sched_clock_cpu()
408 return clock; in sched_clock_cpu()
437 * The watchdog just found this TSC to (still) be stable, so now is a in sched_clock_tick_stable()
447 * We are going deep-idle (IRQs are disabled):
484 notrace u64 sched_clock_cpu(int cpu) in sched_clock_cpu()
495 * Running clock - returns the time that has elapsed while a guest has been
500 * Architectures and sub-architectures can override this.