Lines Matching +full:switch +full:- +full:freq +full:- +full:select

1 // SPDX-License-Identifier: GPL-2.0+
20 #include "tick-internal.h"
25 u64 delta = clocksource_delta(end, start, cs->mask); in cycles_to_nsec_safe()
27 if (likely(delta < cs->max_cycles)) in cycles_to_nsec_safe()
28 return clocksource_cyc2ns(delta, cs->mult, cs->shift); in cycles_to_nsec_safe()
30 return mul_u64_u32_shr(delta, cs->mult, cs->shift); in cycles_to_nsec_safe()
34 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
69 sftacc--; in clocks_calc_mult_shift()
76 for (sft = 32; sft > 0; sft--) { in clocks_calc_mult_shift()
88 /*[Clocksource internal variables]---------
98 * Name of the user-specified clocksource.
123 * a lower bound for cs->uncertainty_margin values when registering clocks.
129 * precise (for example, with a sub-nanosecond period), the maximum
142 * Default for maximum permissible skew when cs->uncertainty_margin is
143 * not specified, and the lower bound even when cs->uncertainty_margin
145 * clocks with unspecifed cs->uncertainty_margin, so this macro is used
196 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); in __clocksource_unstable()
197 cs->flags |= CLOCK_SOURCE_UNSTABLE; in __clocksource_unstable()
201 * re-rate and re-select. in __clocksource_unstable()
203 if (list_empty(&cs->list)) { in __clocksource_unstable()
204 cs->rating = 0; in __clocksource_unstable()
208 if (cs->mark_unstable) in __clocksource_unstable()
209 cs->mark_unstable(cs); in __clocksource_unstable()
217 * clocksource_mark_unstable - mark clocksource unstable via watchdog
221 * it defers demotion and re-selection to a kthread.
228 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { in clocksource_mark_unstable()
229 if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) in clocksource_mark_unstable()
230 list_add(&cs->wd_list, &watchdog_list); in clocksource_mark_unstable()
247 int64_t md = 2 * watchdog->uncertainty_margin; in cs_watchdog_read()
255 *wdnow = watchdog->read(watchdog); in cs_watchdog_read()
256 *csnow = cs->read(cs); in cs_watchdog_read()
257 wd_end = watchdog->read(watchdog); in cs_watchdog_read()
258 wd_end2 = watchdog->read(watchdog); in cs_watchdog_read()
262 if (wd_delay <= md + cs->uncertainty_margin) { in cs_watchdog_read()
265 smp_processor_id(), watchdog->name, nretries); in cs_watchdog_read()
275 * If consecutive WD read-back delay > md, report in cs_watchdog_read()
284 …warn("timekeeping watchdog on CPU%d: wd-%s-wd excessive read-back delay of %lldns vs. limit of %ld… in cs_watchdog_read()
285 smp_processor_id(), cs->name, wd_delay, WATCHDOG_MAX_SKEW, wd_seq_delay, nretries, cs->name); in cs_watchdog_read()
289 pr_info("timekeeping watchdog on CPU%d: %s wd-wd read-back delay of %lldns\n", in cs_watchdog_read()
290 smp_processor_id(), watchdog->name, wd_seq_delay); in cs_watchdog_read()
291 pr_info("wd-%s-wd read-back delay of %lldns, clock-skew test skipped!\n", in cs_watchdog_read()
292 cs->name, wd_delay); in cs_watchdog_read()
317 /* Make sure to select at least one CPU other than the current CPU. */ in clocksource_verify_choose_cpus()
330 * Randomly select the specified number of CPUs. If the same in clocksource_verify_choose_cpus()
338 cpu = cpumask_next(cpu - 1, cpu_online_mask); in clocksource_verify_choose_cpus()
353 csnow_mid = cs->read(cs); in clocksource_verify_one_cpu()
373 pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name); in clocksource_verify_percpu()
377 …pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu,… in clocksource_verify_percpu()
381 csnow_begin = cs->read(cs); in clocksource_verify_percpu()
383 csnow_end = cs->read(cs); in clocksource_verify_percpu()
384 delta = (s64)((csnow_mid - csnow_begin) & cs->mask); in clocksource_verify_percpu()
387 delta = (csnow_end - csnow_mid) & cs->mask; in clocksource_verify_percpu()
400 cpumask_pr_args(&cpus_ahead), testcpu, cs->name); in clocksource_verify_percpu()
403 cpumask_pr_args(&cpus_behind), testcpu, cs->name); in clocksource_verify_percpu()
405 pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n", in clocksource_verify_percpu()
406 testcpu, cs_nsec_min, cs_nsec_max, cs->name); in clocksource_verify_percpu()
415 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; in clocksource_reset_watchdog()
438 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in clocksource_watchdog()
462 * cs->last could keep unchanged for 5 minutes, reset in clocksource_watchdog()
471 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || in clocksource_watchdog()
473 cs->flags |= CLOCK_SOURCE_WATCHDOG; in clocksource_watchdog()
474 cs->wd_last = wdnow; in clocksource_watchdog()
475 cs->cs_last = csnow; in clocksource_watchdog()
479 wd_nsec = cycles_to_nsec_safe(watchdog, cs->wd_last, wdnow); in clocksource_watchdog()
480 cs_nsec = cycles_to_nsec_safe(cs, cs->cs_last, csnow); in clocksource_watchdog()
481 wdlast = cs->wd_last; /* save these in case we print them */ in clocksource_watchdog()
482 cslast = cs->cs_last; in clocksource_watchdog()
483 cs->cs_last = csnow; in clocksource_watchdog()
484 cs->wd_last = wdnow; in clocksource_watchdog()
511 md = cs->uncertainty_margin + watchdog->uncertainty_margin; in clocksource_watchdog()
512 if (abs(cs_nsec - wd_nsec) > md) { in clocksource_watchdog()
518 smp_processor_id(), cs->name); in clocksource_watchdog()
520 watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask); in clocksource_watchdog()
522 cs->name, cs_nsec, csnow, cslast, cs->mask); in clocksource_watchdog()
523 cs_wd_msec = div_s64_rem(cs_nsec - wd_nsec, 1000 * 1000, &wd_rem); in clocksource_watchdog()
526 cs->name, cs_nsec - wd_nsec, cs_wd_msec, watchdog->name, wd_nsec, wd_msec); in clocksource_watchdog()
528 pr_warn(" '%s' is current clocksource.\n", cs->name); in clocksource_watchdog()
530 … '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name); in clocksource_watchdog()
537 if (cs == curr_clocksource && cs->tick_stable) in clocksource_watchdog()
538 cs->tick_stable(cs); in clocksource_watchdog()
540 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && in clocksource_watchdog()
541 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && in clocksource_watchdog()
542 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { in clocksource_watchdog()
543 /* Mark it valid for high-res. */ in clocksource_watchdog()
544 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_watchdog()
562 cs->flags |= CLOCK_SOURCE_RESELECT; in clocksource_watchdog()
622 INIT_LIST_HEAD(&cs->wd_list); in clocksource_enqueue_watchdog()
624 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { in clocksource_enqueue_watchdog()
626 list_add(&cs->wd_list, &watchdog_list); in clocksource_enqueue_watchdog()
627 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; in clocksource_enqueue_watchdog()
630 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) in clocksource_enqueue_watchdog()
631 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_enqueue_watchdog()
648 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) in clocksource_select_watchdog()
656 if (!watchdog || cs->rating > watchdog->rating) in clocksource_select_watchdog()
675 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { in clocksource_dequeue_watchdog()
677 list_del_init(&cs->wd_list); in clocksource_dequeue_watchdog()
688 int select = 0; in __clocksource_watchdog_kthread() local
690 /* Do any required per-CPU skew verification. */ in __clocksource_watchdog_kthread()
692 curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE && in __clocksource_watchdog_kthread()
693 curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU) in __clocksource_watchdog_kthread()
698 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in __clocksource_watchdog_kthread()
699 list_del_init(&cs->wd_list); in __clocksource_watchdog_kthread()
701 select = 1; in __clocksource_watchdog_kthread()
703 if (cs->flags & CLOCK_SOURCE_RESELECT) { in __clocksource_watchdog_kthread()
704 cs->flags &= ~CLOCK_SOURCE_RESELECT; in __clocksource_watchdog_kthread()
705 select = 1; in __clocksource_watchdog_kthread()
712 return select; in __clocksource_watchdog_kthread()
733 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) in clocksource_enqueue_watchdog()
734 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_enqueue_watchdog()
759 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) in __clocksource_suspend_select()
767 if (cs->suspend || cs->resume) { in __clocksource_suspend_select()
769 cs->name); in __clocksource_suspend_select()
773 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) in __clocksource_suspend_select()
778 * clocksource_suspend_select - Select the best clocksource for suspend timing
779 * @fallback: if select a fallback clocksource
799 * clocksource_start_suspend_timing - Start measuring the suspend timing
807 * that means processes are frozen, non-boot cpus and interrupts are disabled
826 if (suspend_clocksource->enable && in clocksource_start_suspend_timing()
827 suspend_clocksource->enable(suspend_clocksource)) { in clocksource_start_suspend_timing()
828 pr_warn_once("Failed to enable the non-suspend-able clocksource.\n"); in clocksource_start_suspend_timing()
832 suspend_start = suspend_clocksource->read(suspend_clocksource); in clocksource_start_suspend_timing()
836 * clocksource_stop_suspend_timing - Stop measuring the suspend timing
864 now = suspend_clocksource->read(suspend_clocksource); in clocksource_stop_suspend_timing()
873 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) in clocksource_stop_suspend_timing()
874 suspend_clocksource->disable(suspend_clocksource); in clocksource_stop_suspend_timing()
880 * clocksource_suspend - suspend the clocksource(s)
887 if (cs->suspend) in clocksource_suspend()
888 cs->suspend(cs); in clocksource_suspend()
892 * clocksource_resume - resume the clocksource(s)
899 if (cs->resume) in clocksource_resume()
900 cs->resume(cs); in clocksource_resume()
906 * clocksource_touch_watchdog - Update watchdog
918 * clocksource_max_adjustment- Returns max adjustment amount
928 ret = (u64)cs->mult * 11; in clocksource_max_adjustment()
934 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
954 * cyc2ns() function without overflowing a 64-bit result. in clocks_calc_max_nsecs()
966 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); in clocks_calc_max_nsecs()
979 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
985 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift, in clocksource_update_max_deferment()
986 cs->maxadj, cs->mask, in clocksource_update_max_deferment()
987 &cs->max_cycles); in clocksource_update_max_deferment()
1005 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) in clocksource_find_best()
1029 if (strcmp(cs->name, override_name) != 0) in __clocksource_select()
1032 * Check to make sure we don't switch to a non-highres in __clocksource_select()
1036 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { in __clocksource_select()
1038 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in __clocksource_select()
1039 …pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/N… in __clocksource_select()
1040 cs->name); in __clocksource_select()
1047 pr_info("Override clocksource %s is not currently HRT compatible - deferring\n", in __clocksource_select()
1048 cs->name); in __clocksource_select()
1058 pr_info("Switched to clocksource %s\n", best->name); in __clocksource_select()
1064 * clocksource_select - Select the best clocksource available
1068 * Select the clocksource with the best rating, or the clocksource,
1082 * clocksource_done_booting - Called near the end of core bootup
1113 if (tmp->rating < cs->rating) in clocksource_enqueue()
1115 entry = &tmp->list; in clocksource_enqueue()
1117 list_add(&cs->list, entry); in clocksource_enqueue()
1121 * __clocksource_update_freq_scale - Used update clocksource with new freq
1123 * @scale: Scale factor multiplied against freq to get clocksource hz
1124 * @freq: clocksource frequency (cycles per second) divided by scale
1126 * This should only be called from the clocksource->enable() method.
1132 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) in __clocksource_update_freq_scale() argument
1137 * Default clocksources are *special* and self-define their mult/shift. in __clocksource_update_freq_scale()
1138 * But, you're not special, so you should specify a freq value. in __clocksource_update_freq_scale()
1140 if (freq) { in __clocksource_update_freq_scale()
1143 * wrapping around. For clocksources which have a mask > 32-bit in __clocksource_update_freq_scale()
1147 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to in __clocksource_update_freq_scale()
1150 sec = cs->mask; in __clocksource_update_freq_scale()
1151 do_div(sec, freq); in __clocksource_update_freq_scale()
1155 else if (sec > 600 && cs->mask > UINT_MAX) in __clocksource_update_freq_scale()
1158 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, in __clocksource_update_freq_scale()
1164 * both scale and freq are non-zero, calculate the clock period, but in __clocksource_update_freq_scale()
1166 * However, if either of scale or freq is zero, be very conservative in __clocksource_update_freq_scale()
1167 * and take the tens-of-milliseconds WATCHDOG_THRESHOLD value in __clocksource_update_freq_scale()
1177 if (scale && freq && !cs->uncertainty_margin) { in __clocksource_update_freq_scale()
1178 cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq); in __clocksource_update_freq_scale()
1179 if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW) in __clocksource_update_freq_scale()
1180 cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW; in __clocksource_update_freq_scale()
1181 } else if (!cs->uncertainty_margin) { in __clocksource_update_freq_scale()
1182 cs->uncertainty_margin = WATCHDOG_THRESHOLD; in __clocksource_update_freq_scale()
1184 WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW); in __clocksource_update_freq_scale()
1190 cs->maxadj = clocksource_max_adjustment(cs); in __clocksource_update_freq_scale()
1191 while (freq && ((cs->mult + cs->maxadj < cs->mult) in __clocksource_update_freq_scale()
1192 || (cs->mult - cs->maxadj > cs->mult))) { in __clocksource_update_freq_scale()
1193 cs->mult >>= 1; in __clocksource_update_freq_scale()
1194 cs->shift--; in __clocksource_update_freq_scale()
1195 cs->maxadj = clocksource_max_adjustment(cs); in __clocksource_update_freq_scale()
1199 * Only warn for *special* clocksources that self-define in __clocksource_update_freq_scale()
1200 * their mult/shift values and don't specify a freq. in __clocksource_update_freq_scale()
1202 WARN_ONCE(cs->mult + cs->maxadj < cs->mult, in __clocksource_update_freq_scale()
1204 cs->name); in __clocksource_update_freq_scale()
1209 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); in __clocksource_update_freq_scale()
1214 * __clocksource_register_scale - Used to install new clocksources
1216 * @scale: Scale factor multiplied against freq to get clocksource hz
1217 * @freq: clocksource frequency (cycles per second) divided by scale
1219 * Returns -EBUSY if registration fails, zero otherwise.
1224 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) in __clocksource_register_scale() argument
1230 if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX)) in __clocksource_register_scale()
1231 cs->id = CSID_GENERIC; in __clocksource_register_scale()
1232 if (cs->vdso_clock_mode < 0 || in __clocksource_register_scale()
1233 cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) { in __clocksource_register_scale()
1235 cs->name, cs->vdso_clock_mode); in __clocksource_register_scale()
1236 cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; in __clocksource_register_scale()
1240 __clocksource_update_freq_scale(cs, scale, freq); in __clocksource_register_scale()
1260 list_del(&cs->list); in __clocksource_change_rating()
1261 cs->rating = rating; in __clocksource_change_rating()
1266 * clocksource_change_rating - Change the rating of a registered clocksource
1294 /* Select and try to install a replacement watchdog. */ in clocksource_unbind()
1297 return -EBUSY; in clocksource_unbind()
1301 /* Select and try to install a replacement clock source */ in clocksource_unbind()
1304 return -EBUSY; in clocksource_unbind()
1309 * Select and try to install a replacement suspend clocksource. in clocksource_unbind()
1318 list_del_init(&cs->list); in clocksource_unbind()
1325 * clocksource_unregister - remove a registered clocksource
1333 if (!list_empty(&cs->list)) in clocksource_unregister()
1342 * current_clocksource_show - sysfs interface for current clocksource
1356 count = sysfs_emit(buf, "%s\n", curr_clocksource->name); in current_clocksource_show()
1368 return -EINVAL; in sysfs_get_uname()
1371 if (buf[cnt-1] == '\n') in sysfs_get_uname()
1372 cnt--; in sysfs_get_uname()
1380 * current_clocksource_store - interface for manually overriding clocksource
1408 * unbind_clocksource_store - interface for manually unbinding clocksource
1428 ret = -ENODEV; in unbind_clocksource_store()
1431 if (strcmp(cs->name, name)) in unbind_clocksource_store()
1443 * available_clocksource_show - sysfs interface for listing clocksource
1460 * Don't show non-HRES clocksource if the tick code is in available_clocksource_show()
1464 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) in available_clocksource_show()
1466 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), in available_clocksource_show()
1467 "%s ", src->name); in available_clocksource_show()
1472 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); in available_clocksource_show()
1511 * boot_override_clocksource - boot clock override
1529 * boot_override_clock - Compatibility layer for deprecated boot option
1538 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n"); in boot_override_clock()
1541 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n"); in boot_override_clock()