#include <linux/clockchips.h>
#include <linux/hrtimer.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/device.h>
#include "tick-internal.h"
static LIST_HEAD(clockevent_devices);
static LIST_HEAD(clockevents_released);
static DEFINE_RAW_SPINLOCK(clockevents_lock);
static DEFINE_MUTEX(clockevents_mutex);
struct ce_unbind { … };
static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
bool ismax)
{ … }
u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
{ … }
EXPORT_SYMBOL_GPL(…);
static int __clockevents_switch_state(struct clock_event_device *dev,
enum clock_event_state state)
{ … }
void clockevents_switch_state(struct clock_event_device *dev,
enum clock_event_state state)
{ … }
void clockevents_shutdown(struct clock_event_device *dev)
{ … }
int clockevents_tick_resume(struct clock_event_device *dev)
{ … }
#ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
#define MIN_DELTA_LIMIT …
static int clockevents_increase_min_delta(struct clock_event_device *dev)
{ … }
static int clockevents_program_min_delta(struct clock_event_device *dev)
{ … }
#else
static int clockevents_program_min_delta(struct clock_event_device *dev)
{
unsigned long long clc;
int64_t delta = 0;
int i;
for (i = 0; i < 10; i++) {
delta += dev->min_delta_ns;
dev->next_event = ktime_add_ns(ktime_get(), delta);
if (clockevent_state_shutdown(dev))
return 0;
dev->retries++;
clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
if (dev->set_next_event((unsigned long) clc, dev) == 0)
return 0;
}
return -ETIME;
}
#endif
int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
bool force)
{ … }
static void clockevents_notify_released(void)
{ … }
static int clockevents_replace(struct clock_event_device *ced)
{ … }
static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
{ … }
static void __clockevents_unbind(void *arg)
{ … }
static int clockevents_unbind(struct clock_event_device *ced, int cpu)
{ … }
int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
{ … }
EXPORT_SYMBOL_GPL(…);
void clockevents_register_device(struct clock_event_device *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
static void clockevents_config(struct clock_event_device *dev, u32 freq)
{ … }
void clockevents_config_and_register(struct clock_event_device *dev,
u32 freq, unsigned long min_delta,
unsigned long max_delta)
{ … }
EXPORT_SYMBOL_GPL(…);
int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
{ … }
int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
{ … }
void clockevents_handle_noop(struct clock_event_device *dev)
{ … }
void clockevents_exchange_device(struct clock_event_device *old,
struct clock_event_device *new)
{ … }
void clockevents_suspend(void)
{ … }
void clockevents_resume(void)
{ … }
#ifdef CONFIG_HOTPLUG_CPU
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_offline_cpu(unsigned int cpu)
{ … }
# endif
void tick_cleanup_dead_cpu(int cpu)
{ … }
#endif
#ifdef CONFIG_SYSFS
static const struct bus_type clockevents_subsys = …;
static DEFINE_PER_CPU(struct device, tick_percpu_dev);
static struct tick_device *tick_get_tick_dev(struct device *dev);
static ssize_t current_device_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{ … }
static DEVICE_ATTR_RO(current_device);
static ssize_t unbind_device_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{ … }
static DEVICE_ATTR_WO(unbind_device);
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static struct device tick_bc_dev = …;
static struct tick_device *tick_get_tick_dev(struct device *dev)
{ … }
static __init int tick_broadcast_init_sysfs(void)
{ … }
#else
static struct tick_device *tick_get_tick_dev(struct device *dev)
{
return &per_cpu(tick_cpu_device, dev->id);
}
static inline int tick_broadcast_init_sysfs(void) { return 0; }
#endif
static int __init tick_init_sysfs(void)
{ … }
static int __init clockevents_init_sysfs(void)
{ … }
device_initcall(clockevents_init_sysfs);
#endif