// SPDX-License-Identifier: GPL-2.0+ /* * This file contains the functions which manage clocksource drivers. * * Copyright (C) 2004, 2005 IBM, John Stultz ([email protected]) */ #define pr_fmt(fmt) … #include <linux/device.h> #include <linux/clocksource.h> #include <linux/init.h> #include <linux/module.h> #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ #include <linux/tick.h> #include <linux/kthread.h> #include <linux/prandom.h> #include <linux/cpu.h> #include "tick-internal.h" #include "timekeeping_internal.h" static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end) { … } /** * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks * @mult: pointer to mult variable * @shift: pointer to shift variable * @from: frequency to convert from * @to: frequency to convert to * @maxsec: guaranteed runtime conversion range in seconds * * The function evaluates the shift/mult pair for the scaled math * operations of clocksources and clockevents. * * @to and @from are frequency values in HZ. For clock sources @to is * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock * event @to is the counter frequency and @from is NSEC_PER_SEC. * * The @maxsec conversion range argument controls the time frame in * seconds which must be covered by the runtime conversion with the * calculated mult and shift factors. This guarantees that no 64bit * overflow happens when the input value of the conversion is * multiplied with the calculated mult factor. Larger ranges may * reduce the conversion accuracy by choosing smaller mult and shift * factors. */ void clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) { … } EXPORT_SYMBOL_GPL(…); /*[Clocksource internal variables]--------- * curr_clocksource: * currently selected clocksource. * suspend_clocksource: * used to calculate the suspend time. * clocksource_list: * linked list with the registered clocksources * clocksource_mutex: * protects manipulations to curr_clocksource and the clocksource_list * override_name: * Name of the user-specified clocksource. */ static struct clocksource *curr_clocksource; static struct clocksource *suspend_clocksource; static LIST_HEAD(clocksource_list); static DEFINE_MUTEX(clocksource_mutex); static char override_name[CS_NAME_LEN]; static int finished_booting; static u64 suspend_start; /* * Interval: 0.5sec. */ #define WATCHDOG_INTERVAL … #define WATCHDOG_INTERVAL_MAX_NS … /* * Threshold: 0.0312s, when doubled: 0.0625s. * Also a default for cs->uncertainty_margin when registering clocks. */ #define WATCHDOG_THRESHOLD … /* * Maximum permissible delay between two readouts of the watchdog * clocksource surrounding a read of the clocksource being validated. * This delay could be due to SMIs, NMIs, or to VCPU preemptions. Used as * a lower bound for cs->uncertainty_margin values when registering clocks. * * The default of 500 parts per million is based on NTP's limits. * If a clocksource is good enough for NTP, it is good enough for us! */ #ifdef CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US #define MAX_SKEW_USEC … #else #define MAX_SKEW_USEC … #endif #define WATCHDOG_MAX_SKEW … #ifdef CONFIG_CLOCKSOURCE_WATCHDOG static void clocksource_watchdog_work(struct work_struct *work); static void clocksource_select(void); static LIST_HEAD(watchdog_list); static struct clocksource *watchdog; static struct timer_list watchdog_timer; static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); static DEFINE_SPINLOCK(watchdog_lock); static int watchdog_running; static atomic_t watchdog_reset_pending; static int64_t watchdog_max_interval; static inline void clocksource_watchdog_lock(unsigned long *flags) { … } static inline void clocksource_watchdog_unlock(unsigned long *flags) { … } static int clocksource_watchdog_kthread(void *data); static void __clocksource_change_rating(struct clocksource *cs, int rating); static void clocksource_watchdog_work(struct work_struct *work) { … } static void __clocksource_unstable(struct clocksource *cs) { … } /** * clocksource_mark_unstable - mark clocksource unstable via watchdog * @cs: clocksource to be marked unstable * * This function is called by the x86 TSC code to mark clocksources as unstable; * it defers demotion and re-selection to a kthread. */ void clocksource_mark_unstable(struct clocksource *cs) { … } static int verify_n_cpus = …; module_param(verify_n_cpus, int, 0644); enum wd_read_status { … }; static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) { … } static u64 csnow_mid; static cpumask_t cpus_ahead; static cpumask_t cpus_behind; static cpumask_t cpus_chosen; static void clocksource_verify_choose_cpus(void) { … } static void clocksource_verify_one_cpu(void *csin) { … } void clocksource_verify_percpu(struct clocksource *cs) { … } EXPORT_SYMBOL_GPL(…); static inline void clocksource_reset_watchdog(void) { … } static void clocksource_watchdog(struct timer_list *unused) { … } static inline void clocksource_start_watchdog(void) { … } static inline void clocksource_stop_watchdog(void) { … } static void clocksource_resume_watchdog(void) { … } static void clocksource_enqueue_watchdog(struct clocksource *cs) { … } static void clocksource_select_watchdog(bool fallback) { … } static void clocksource_dequeue_watchdog(struct clocksource *cs) { … } static int __clocksource_watchdog_kthread(void) { … } static int clocksource_watchdog_kthread(void *data) { … } static bool clocksource_is_watchdog(struct clocksource *cs) { … } #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ static void clocksource_enqueue_watchdog(struct clocksource *cs) { if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; } static void clocksource_select_watchdog(bool fallback) { } static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } static inline void clocksource_resume_watchdog(void) { } static inline int __clocksource_watchdog_kthread(void) { return 0; } static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } void clocksource_mark_unstable(struct clocksource *cs) { } static inline void clocksource_watchdog_lock(unsigned long *flags) { } static inline void clocksource_watchdog_unlock(unsigned long *flags) { } #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ static bool clocksource_is_suspend(struct clocksource *cs) { … } static void __clocksource_suspend_select(struct clocksource *cs) { … } /** * clocksource_suspend_select - Select the best clocksource for suspend timing * @fallback: if select a fallback clocksource */ static void clocksource_suspend_select(bool fallback) { … } /** * clocksource_start_suspend_timing - Start measuring the suspend timing * @cs: current clocksource from timekeeping * @start_cycles: current cycles from timekeeping * * This function will save the start cycle values of suspend timer to calculate * the suspend time when resuming system. * * This function is called late in the suspend process from timekeeping_suspend(), * that means processes are frozen, non-boot cpus and interrupts are disabled * now. It is therefore possible to start the suspend timer without taking the * clocksource mutex. */ void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) { … } /** * clocksource_stop_suspend_timing - Stop measuring the suspend timing * @cs: current clocksource from timekeeping * @cycle_now: current cycles from timekeeping * * This function will calculate the suspend time from suspend timer. * * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource. * * This function is called early in the resume process from timekeeping_resume(), * that means there is only one cpu, no processes are running and the interrupts * are disabled. It is therefore possible to stop the suspend timer without * taking the clocksource mutex. */ u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) { … } /** * clocksource_suspend - suspend the clocksource(s) */ void clocksource_suspend(void) { … } /** * clocksource_resume - resume the clocksource(s) */ void clocksource_resume(void) { … } /** * clocksource_touch_watchdog - Update watchdog * * Update the watchdog after exception contexts such as kgdb so as not * to incorrectly trip the watchdog. This might fail when the kernel * was stopped in code which holds watchdog_lock. */ void clocksource_touch_watchdog(void) { … } /** * clocksource_max_adjustment- Returns max adjustment amount * @cs: Pointer to clocksource * */ static u32 clocksource_max_adjustment(struct clocksource *cs) { … } /** * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted * @mult: cycle to nanosecond multiplier * @shift: cycle to nanosecond divisor (power of two) * @maxadj: maximum adjustment value to mult (~11%) * @mask: bitmask for two's complement subtraction of non 64 bit counters * @max_cyc: maximum cycle value before potential overflow (does not include * any safety margin) * * NOTE: This function includes a safety margin of 50%, in other words, we * return half the number of nanoseconds the hardware counter can technically * cover. This is done so that we can potentially detect problems caused by * delayed timers or bad hardware, which might result in time intervals that * are larger than what the math used can handle without overflows. */ u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) { … } /** * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles * @cs: Pointer to clocksource to be updated * */ static inline void clocksource_update_max_deferment(struct clocksource *cs) { … } static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur) { … } static void __clocksource_select(bool skipcur) { … } /** * clocksource_select - Select the best clocksource available * * Private function. Must hold clocksource_mutex when called. * * Select the clocksource with the best rating, or the clocksource, * which is selected by userspace override. */ static void clocksource_select(void) { … } static void clocksource_select_fallback(void) { … } /* * clocksource_done_booting - Called near the end of core bootup * * Hack to avoid lots of clocksource churn at boot time. * We use fs_initcall because we want this to start before * device_initcall but after subsys_initcall. */ static int __init clocksource_done_booting(void) { … } fs_initcall(clocksource_done_booting); /* * Enqueue the clocksource sorted by rating */ static void clocksource_enqueue(struct clocksource *cs) { … } /** * __clocksource_update_freq_scale - Used update clocksource with new freq * @cs: clocksource to be registered * @scale: Scale factor multiplied against freq to get clocksource hz * @freq: clocksource frequency (cycles per second) divided by scale * * This should only be called from the clocksource->enable() method. * * This *SHOULD NOT* be called directly! Please use the * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper * functions. */ void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) { … } EXPORT_SYMBOL_GPL(…); /** * __clocksource_register_scale - Used to install new clocksources * @cs: clocksource to be registered * @scale: Scale factor multiplied against freq to get clocksource hz * @freq: clocksource frequency (cycles per second) divided by scale * * Returns -EBUSY if registration fails, zero otherwise. * * This *SHOULD NOT* be called directly! Please use the * clocksource_register_hz() or clocksource_register_khz helper functions. */ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) { … } EXPORT_SYMBOL_GPL(…); static void __clocksource_change_rating(struct clocksource *cs, int rating) { … } /** * clocksource_change_rating - Change the rating of a registered clocksource * @cs: clocksource to be changed * @rating: new rating */ void clocksource_change_rating(struct clocksource *cs, int rating) { … } EXPORT_SYMBOL(…); /* * Unbind clocksource @cs. Called with clocksource_mutex held */ static int clocksource_unbind(struct clocksource *cs) { … } /** * clocksource_unregister - remove a registered clocksource * @cs: clocksource to be unregistered */ int clocksource_unregister(struct clocksource *cs) { … } EXPORT_SYMBOL(…); #ifdef CONFIG_SYSFS /** * current_clocksource_show - sysfs interface for current clocksource * @dev: unused * @attr: unused * @buf: char buffer to be filled with clocksource list * * Provides sysfs interface for listing current clocksource. */ static ssize_t current_clocksource_show(struct device *dev, struct device_attribute *attr, char *buf) { … } ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) { … } /** * current_clocksource_store - interface for manually overriding clocksource * @dev: unused * @attr: unused * @buf: name of override clocksource * @count: length of buffer * * Takes input from sysfs interface for manually overriding the default * clocksource selection. */ static ssize_t current_clocksource_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { … } static DEVICE_ATTR_RW(current_clocksource); /** * unbind_clocksource_store - interface for manually unbinding clocksource * @dev: unused * @attr: unused * @buf: unused * @count: length of buffer * * Takes input from sysfs interface for manually unbinding a clocksource. */ static ssize_t unbind_clocksource_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { … } static DEVICE_ATTR_WO(unbind_clocksource); /** * available_clocksource_show - sysfs interface for listing clocksource * @dev: unused * @attr: unused * @buf: char buffer to be filled with clocksource list * * Provides sysfs interface for listing registered clocksources */ static ssize_t available_clocksource_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static DEVICE_ATTR_RO(available_clocksource); static struct attribute *clocksource_attrs[] = …; ATTRIBUTE_GROUPS(…); static const struct bus_type clocksource_subsys = …; static struct device device_clocksource = …; static int __init init_clocksource_sysfs(void) { … } device_initcall(init_clocksource_sysfs); #endif /* CONFIG_SYSFS */ /** * boot_override_clocksource - boot clock override * @str: override name * * Takes a clocksource= boot argument and uses it * as the clocksource override name. */ static int __init boot_override_clocksource(char* str) { … } __setup(…); /** * boot_override_clock - Compatibility layer for deprecated boot option * @str: override name * * DEPRECATED! Takes a clock= boot argument and uses it * as the clocksource override name */ static int __init boot_override_clock(char* str) { … } __setup(…);