linux/arch/x86/kernel/hpet.c

// SPDX-License-Identifier: GPL-2.0-only
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/hpet.h>
#include <linux/cpu.h>
#include <linux/irq.h>

#include <asm/irq_remapping.h>
#include <asm/hpet.h>
#include <asm/time.h>
#include <asm/mwait.h>

#undef  pr_fmt
#define pr_fmt(fmt)

enum hpet_mode {};

struct hpet_channel {};

struct hpet_base {};

#define HPET_MASK

#define HPET_MIN_CYCLES
#define HPET_MIN_PROG_DELTA

/*
 * HPET address is set in acpi/boot.c, when an ACPI entry exists
 */
unsigned long				hpet_address;
u8					hpet_blockid; /* OS timer block num */
bool					hpet_msi_disable;

#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_GENERIC_MSI_IRQ)
static DEFINE_PER_CPU(struct hpet_channel *, cpu_hpet_channel);
static struct irq_domain		*hpet_domain;
#endif

static void __iomem			*hpet_virt_address;

static struct hpet_base			hpet_base;

static bool				hpet_legacy_int_enabled;
static unsigned long			hpet_freq;

bool					boot_hpet_disable;
bool					hpet_force_user;
static bool				hpet_verbose;

static inline
struct hpet_channel *clockevent_to_channel(struct clock_event_device *evt)
{}

inline unsigned int hpet_readl(unsigned int a)
{}

static inline void hpet_writel(unsigned int d, unsigned int a)
{}

static inline void hpet_set_mapping(void)
{}

static inline void hpet_clear_mapping(void)
{}

/*
 * HPET command line enable / disable
 */
static int __init hpet_setup(char *str)
{}
__setup();

static int __init disable_hpet(char *str)
{}
__setup();

static inline int is_hpet_capable(void)
{}

/**
 * is_hpet_enabled - Check whether the legacy HPET timer interrupt is enabled
 */
int is_hpet_enabled(void)
{}
EXPORT_SYMBOL_GPL();

static void _hpet_print_config(const char *function, int line)
{}

#define hpet_print_config()

/*
 * When the HPET driver (/dev/hpet) is enabled, we need to reserve
 * timer 0 and timer 1 in case of RTC emulation.
 */
#ifdef CONFIG_HPET

static void __init hpet_reserve_platform_timers(void)
{}

static void __init hpet_select_device_channel(void)
{}

#else
static inline void hpet_reserve_platform_timers(void) { }
static inline void hpet_select_device_channel(void) {}
#endif

/* Common HPET functions */
static void hpet_stop_counter(void)
{}

static void hpet_reset_counter(void)
{}

static void hpet_start_counter(void)
{}

static void hpet_restart_counter(void)
{}

static void hpet_resume_device(void)
{}

static void hpet_resume_counter(struct clocksource *cs)
{}

static void hpet_enable_legacy_int(void)
{}

static int hpet_clkevt_set_state_periodic(struct clock_event_device *evt)
{}

static int hpet_clkevt_set_state_oneshot(struct clock_event_device *evt)
{}

static int hpet_clkevt_set_state_shutdown(struct clock_event_device *evt)
{}

static int hpet_clkevt_legacy_resume(struct clock_event_device *evt)
{}

static int
hpet_clkevt_set_next_event(unsigned long delta, struct clock_event_device *evt)
{}

static void hpet_init_clockevent(struct hpet_channel *hc, unsigned int rating)
{}

static void __init hpet_legacy_clockevent_register(struct hpet_channel *hc)
{}

/*
 * HPET MSI Support
 */
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_GENERIC_MSI_IRQ)
static void hpet_msi_unmask(struct irq_data *data)
{}

static void hpet_msi_mask(struct irq_data *data)
{}

static void hpet_msi_write(struct hpet_channel *hc, struct msi_msg *msg)
{}

static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
{}

static struct irq_chip hpet_msi_controller __ro_after_init =;

static int hpet_msi_init(struct irq_domain *domain,
			 struct msi_domain_info *info, unsigned int virq,
			 irq_hw_number_t hwirq, msi_alloc_info_t *arg)
{}

static void hpet_msi_free(struct irq_domain *domain,
			  struct msi_domain_info *info, unsigned int virq)
{}

static struct msi_domain_ops hpet_msi_domain_ops =;

static struct msi_domain_info hpet_msi_domain_info =;

static struct irq_domain *hpet_create_irq_domain(int hpet_id)
{}

static inline int hpet_dev_id(struct irq_domain *domain)
{}

static int hpet_assign_irq(struct irq_domain *domain, struct hpet_channel *hc,
			   int dev_num)
{}

static int hpet_clkevt_msi_resume(struct clock_event_device *evt)
{}

static irqreturn_t hpet_msi_interrupt_handler(int irq, void *data)
{}

static int hpet_setup_msi_irq(struct hpet_channel *hc)
{}

/* Invoked from the hotplug callback on @cpu */
static void init_one_hpet_msi_clockevent(struct hpet_channel *hc, int cpu)
{}

static struct hpet_channel *hpet_get_unused_clockevent(void)
{}

static int hpet_cpuhp_online(unsigned int cpu)
{}

static int hpet_cpuhp_dead(unsigned int cpu)
{}

static void __init hpet_select_clockevents(void)
{}

#else

static inline void hpet_select_clockevents(void) { }

#define hpet_cpuhp_online
#define hpet_cpuhp_dead

#endif

/*
 * Clock source related code
 */
#if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
/*
 * Reading the HPET counter is a very slow operation. If a large number of
 * CPUs are trying to access the HPET counter simultaneously, it can cause
 * massive delays and slow down system performance dramatically. This may
 * happen when HPET is the default clock source instead of TSC. For a
 * really large system with hundreds of CPUs, the slowdown may be so
 * severe, that it can actually crash the system because of a NMI watchdog
 * soft lockup, for example.
 *
 * If multiple CPUs are trying to access the HPET counter at the same time,
 * we don't actually need to read the counter multiple times. Instead, the
 * other CPUs can use the counter value read by the first CPU in the group.
 *
 * This special feature is only enabled on x86-64 systems. It is unlikely
 * that 32-bit x86 systems will have enough CPUs to require this feature
 * with its associated locking overhead. We also need 64-bit atomic read.
 *
 * The lock and the HPET value are stored together and can be read in a
 * single atomic 64-bit read. It is explicitly assumed that arch_spinlock_t
 * is 32 bits in size.
 */
hpet_lock;

static union hpet_lock hpet __cacheline_aligned =;

static u64 read_hpet(struct clocksource *cs)
{}
#else
/*
 * For UP or 32-bit.
 */
static u64 read_hpet(struct clocksource *cs)
{
	return (u64)hpet_readl(HPET_COUNTER);
}
#endif

static struct clocksource clocksource_hpet =;

/*
 * AMD SB700 based systems with spread spectrum enabled use a SMM based
 * HPET emulation to provide proper frequency setting.
 *
 * On such systems the SMM code is initialized with the first HPET register
 * access and takes some time to complete. During this time the config
 * register reads 0xffffffff. We check for max 1000 loops whether the
 * config register reads a non-0xffffffff value to make sure that the
 * HPET is up and running before we proceed any further.
 *
 * A counting loop is safe, as the HPET access takes thousands of CPU cycles.
 *
 * On non-SB700 based machines this check is only done once and has no
 * side effects.
 */
static bool __init hpet_cfg_working(void)
{}

static bool __init hpet_counting(void)
{}

static bool __init mwait_pc10_supported(void)
{}

/*
 * Check whether the system supports PC10. If so force disable HPET as that
 * stops counting in PC10. This check is overbroad as it does not take any
 * of the following into account:
 *
 *	- ACPI tables
 *	- Enablement of intel_idle
 *	- Command line arguments which limit intel_idle C-state support
 *
 * That's perfectly fine. HPET is a piece of hardware designed by committee
 * and the only reasons why it is still in use on modern systems is the
 * fact that it is impossible to reliably query TSC and CPU frequency via
 * CPUID or firmware.
 *
 * If HPET is functional it is useful for calibrating TSC, but this can be
 * done via PMTIMER as well which seems to be the last remaining timer on
 * X86/INTEL platforms that has not been completely wreckaged by feature
 * creep.
 *
 * In theory HPET support should be removed altogether, but there are older
 * systems out there which depend on it because TSC and APIC timer are
 * dysfunctional in deeper C-states.
 *
 * It's only 20 years now that hardware people have been asked to provide
 * reliable and discoverable facilities which can be used for timekeeping
 * and per CPU timer interrupts.
 *
 * The probability that this problem is going to be solved in the
 * foreseeable future is close to zero, so the kernel has to be cluttered
 * with heuristics to keep up with the ever growing amount of hardware and
 * firmware trainwrecks. Hopefully some day hardware people will understand
 * that the approach of "This can be fixed in software" is not sustainable.
 * Hope dies last...
 */
static bool __init hpet_is_pc10_damaged(void)
{}

/**
 * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
 */
int __init hpet_enable(void)
{}

/*
 * The late initialization runs after the PCI quirks have been invoked
 * which might have detected a system on which the HPET can be enforced.
 *
 * Also, the MSI machinery is not working yet when the HPET is initialized
 * early.
 *
 * If the HPET is enabled, then:
 *
 *  1) Reserve one channel for /dev/hpet if CONFIG_HPET=y
 *  2) Reserve up to num_possible_cpus() channels as per CPU clockevents
 *  3) Setup /dev/hpet if CONFIG_HPET=y
 *  4) Register hotplug callbacks when clockevents are available
 */
static __init int hpet_late_init(void)
{}
fs_initcall(hpet_late_init);

void hpet_disable(void)
{}

#ifdef CONFIG_HPET_EMULATE_RTC

/*
 * HPET in LegacyReplacement mode eats up the RTC interrupt line. When HPET
 * is enabled, we support RTC interrupt functionality in software.
 *
 * RTC has 3 kinds of interrupts:
 *
 *  1) Update Interrupt - generate an interrupt, every second, when the
 *     RTC clock is updated
 *  2) Alarm Interrupt - generate an interrupt at a specific time of day
 *  3) Periodic Interrupt - generate periodic interrupt, with frequencies
 *     2Hz-8192Hz (2Hz-64Hz for non-root user) (all frequencies in powers of 2)
 *
 * (1) and (2) above are implemented using polling at a frequency of 64 Hz:
 * DEFAULT_RTC_INT_FREQ.
 *
 * The exact frequency is a tradeoff between accuracy and interrupt overhead.
 *
 * For (3), we use interrupts at 64 Hz, or the user specified periodic frequency,
 * if it's higher.
 */
#include <linux/mc146818rtc.h>
#include <linux/rtc.h>

#define DEFAULT_RTC_INT_FREQ
#define DEFAULT_RTC_SHIFT
#define RTC_NUM_INTS

static unsigned long hpet_rtc_flags;
static int hpet_prev_update_sec;
static struct rtc_time hpet_alarm_time;
static unsigned long hpet_pie_count;
static u32 hpet_t1_cmp;
static u32 hpet_default_delta;
static u32 hpet_pie_delta;
static unsigned long hpet_pie_limit;

static rtc_irq_handler irq_handler;

/*
 * Check that the HPET counter c1 is ahead of c2
 */
static inline int hpet_cnt_ahead(u32 c1, u32 c2)
{}

/*
 * Registers a IRQ handler.
 */
int hpet_register_irq_handler(rtc_irq_handler handler)
{}
EXPORT_SYMBOL_GPL();

/*
 * Deregisters the IRQ handler registered with hpet_register_irq_handler()
 * and does cleanup.
 */
void hpet_unregister_irq_handler(rtc_irq_handler handler)
{}
EXPORT_SYMBOL_GPL();

/*
 * Channel 1 for RTC emulation. We use one shot mode, as periodic mode
 * is not supported by all HPET implementations for channel 1.
 *
 * hpet_rtc_timer_init() is called when the rtc is initialized.
 */
int hpet_rtc_timer_init(void)
{}
EXPORT_SYMBOL_GPL();

static void hpet_disable_rtc_channel(void)
{}

/*
 * The functions below are called from rtc driver.
 * Return 0 if HPET is not being used.
 * Otherwise do the necessary changes and return 1.
 */
int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
{}
EXPORT_SYMBOL_GPL();

int hpet_set_rtc_irq_bit(unsigned long bit_mask)
{}
EXPORT_SYMBOL_GPL();

int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
{}
EXPORT_SYMBOL_GPL();

int hpet_set_periodic_freq(unsigned long freq)
{}
EXPORT_SYMBOL_GPL();

int hpet_rtc_dropped_irq(void)
{}
EXPORT_SYMBOL_GPL();

static void hpet_rtc_timer_reinit(void)
{}

irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
{}
EXPORT_SYMBOL_GPL();
#endif