linux/kernel/time/posix-timers.c

// SPDX-License-Identifier: GPL-2.0+
/*
 * 2002-10-15  Posix Clocks & timers
 *                           by George Anzinger [email protected]
 *			     Copyright (C) 2002 2003 by MontaVista Software.
 *
 * 2004-06-01  Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
 *			     Copyright (C) 2004 Boris Hu
 *
 * These are all the functions necessary to implement POSIX clocks & timers
 */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/sched/task.h>

#include <linux/uaccess.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/hash.h>
#include <linux/posix-clock.h>
#include <linux/posix-timers.h>
#include <linux/syscalls.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/export.h>
#include <linux/hashtable.h>
#include <linux/compat.h>
#include <linux/nospec.h>
#include <linux/time_namespace.h>

#include "timekeeping.h"
#include "posix-timers.h"

static struct kmem_cache *posix_timers_cache;

/*
 * Timers are managed in a hash table for lockless lookup. The hash key is
 * constructed from current::signal and the timer ID and the timer is
 * matched against current::signal and the timer ID when walking the hash
 * bucket list.
 *
 * This allows checkpoint/restore to reconstruct the exact timer IDs for
 * a process.
 */
static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
static DEFINE_SPINLOCK(hash_lock);

static const struct k_clock * const posix_clocks[];
static const struct k_clock *clockid_to_kclock(const clockid_t id);
static const struct k_clock clock_realtime, clock_monotonic;

/* SIGEV_THREAD_ID cannot share a bit with the other SIGEV values. */
#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
			~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
#endif

static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);

#define lock_timer(tid, flags)

static int hash(struct signal_struct *sig, unsigned int nr)
{}

static struct k_itimer *__posix_timers_find(struct hlist_head *head,
					    struct signal_struct *sig,
					    timer_t id)
{}

static struct k_itimer *posix_timer_by_id(timer_t id)
{}

static int posix_timer_add(struct k_itimer *timer)
{}

static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
{}

static int posix_get_realtime_timespec(clockid_t which_clock, struct timespec64 *tp)
{}

static ktime_t posix_get_realtime_ktime(clockid_t which_clock)
{}

static int posix_clock_realtime_set(const clockid_t which_clock,
				    const struct timespec64 *tp)
{}

static int posix_clock_realtime_adj(const clockid_t which_clock,
				    struct __kernel_timex *t)
{}

static int posix_get_monotonic_timespec(clockid_t which_clock, struct timespec64 *tp)
{}

static ktime_t posix_get_monotonic_ktime(clockid_t which_clock)
{}

static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
{}

static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp)
{}

static int posix_get_monotonic_coarse(clockid_t which_clock,
						struct timespec64 *tp)
{}

static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp)
{}

static int posix_get_boottime_timespec(const clockid_t which_clock, struct timespec64 *tp)
{}

static ktime_t posix_get_boottime_ktime(const clockid_t which_clock)
{}

static int posix_get_tai_timespec(clockid_t which_clock, struct timespec64 *tp)
{}

static ktime_t posix_get_tai_ktime(clockid_t which_clock)
{}

static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp)
{}

static __init int init_posix_timers(void)
{}
__initcall(init_posix_timers);

/*
 * The siginfo si_overrun field and the return value of timer_getoverrun(2)
 * are of type int. Clamp the overrun value to INT_MAX
 */
static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval)
{}

static void common_hrtimer_rearm(struct k_itimer *timr)
{}

/*
 * This function is called from the signal delivery code if
 * info->si_sys_private is not zero, which indicates that the timer has to
 * be rearmed. Restart the timer and update info::si_overrun.
 */
void posixtimer_rearm(struct kernel_siginfo *info)
{}

int posix_timer_event(struct k_itimer *timr, int si_private)
{}

/*
 * This function gets called when a POSIX.1b interval timer expires from
 * the HRTIMER interrupt (soft interrupt on RT kernels).
 *
 * Handles CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_BOOTTIME and CLOCK_TAI
 * based timers.
 */
static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
{}

static struct pid *good_sigevent(sigevent_t * event)
{}

static struct k_itimer * alloc_posix_timer(void)
{}

static void k_itimer_rcu_free(struct rcu_head *head)
{}

static void posix_timer_free(struct k_itimer *tmr)
{}

static void posix_timer_unhash_and_free(struct k_itimer *tmr)
{}

static int common_timer_create(struct k_itimer *new_timer)
{}

/* Create a POSIX.1b interval timer. */
static int do_timer_create(clockid_t which_clock, struct sigevent *event,
			   timer_t __user *created_timer_id)
{}

SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
		struct sigevent __user *, timer_event_spec,
		timer_t __user *, created_timer_id)
{}

#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock,
		       struct compat_sigevent __user *, timer_event_spec,
		       timer_t __user *, created_timer_id)
{}
#endif

static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
{}

static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now)
{}

static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
{}

/*
 * Get the time remaining on a POSIX.1b interval timer.
 *
 * Two issues to handle here:
 *
 *  1) The timer has a requeue pending. The return value must appear as
 *     if the timer has been requeued right now.
 *
 *  2) The timer is a SIGEV_NONE timer. These timers are never enqueued
 *     into the hrtimer queue and therefore never expired. Emulate expiry
 *     here taking #1 into account.
 */
void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
{}

static int do_timer_gettime(timer_t timer_id,  struct itimerspec64 *setting)
{}

/* Get the time remaining on a POSIX.1b interval timer. */
SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
		struct __kernel_itimerspec __user *, setting)
{}

#ifdef CONFIG_COMPAT_32BIT_TIME

SYSCALL_DEFINE2(timer_gettime32, timer_t, timer_id,
		struct old_itimerspec32 __user *, setting)
{}

#endif

/**
 * sys_timer_getoverrun - Get the number of overruns of a POSIX.1b interval timer
 * @timer_id:	The timer ID which identifies the timer
 *
 * The "overrun count" of a timer is one plus the number of expiration
 * intervals which have elapsed between the first expiry, which queues the
 * signal and the actual signal delivery. On signal delivery the "overrun
 * count" is calculated and cached, so it can be returned directly here.
 *
 * As this is relative to the last queued signal the returned overrun count
 * is meaningless outside of the signal delivery path and even there it
 * does not accurately reflect the current state when user space evaluates
 * it.
 *
 * Returns:
 *	-EINVAL		@timer_id is invalid
 *	1..INT_MAX	The number of overruns related to the last delivered signal
 */
SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
{}

static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
			       bool absolute, bool sigev_none)
{}

static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
{}

static void common_timer_wait_running(struct k_itimer *timer)
{}

/*
 * On PREEMPT_RT this prevents priority inversion and a potential livelock
 * against the ksoftirqd thread in case that ksoftirqd gets preempted while
 * executing a hrtimer callback.
 *
 * See the comments in hrtimer_cancel_wait_running(). For PREEMPT_RT=n this
 * just results in a cpu_relax().
 *
 * For POSIX CPU timers with CONFIG_POSIX_CPU_TIMERS_TASK_WORK=n this is
 * just a cpu_relax(). With CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y this
 * prevents spinning on an eventually scheduled out task and a livelock
 * when the task which tries to delete or disarm the timer has preempted
 * the task which runs the expiry in task work context.
 */
static struct k_itimer *timer_wait_running(struct k_itimer *timer,
					   unsigned long *flags)
{}

/* Set a POSIX.1b interval timer. */
int common_timer_set(struct k_itimer *timr, int flags,
		     struct itimerspec64 *new_setting,
		     struct itimerspec64 *old_setting)
{}

static int do_timer_settime(timer_t timer_id, int tmr_flags,
			    struct itimerspec64 *new_spec64,
			    struct itimerspec64 *old_spec64)
{}

/* Set a POSIX.1b interval timer */
SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
		const struct __kernel_itimerspec __user *, new_setting,
		struct __kernel_itimerspec __user *, old_setting)
{}

#ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE4(timer_settime32, timer_t, timer_id, int, flags,
		struct old_itimerspec32 __user *, new,
		struct old_itimerspec32 __user *, old)
{}
#endif

int common_timer_del(struct k_itimer *timer)
{}

static inline int timer_delete_hook(struct k_itimer *timer)
{}

/* Delete a POSIX.1b interval timer. */
SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
{}

/*
 * Delete a timer if it is armed, remove it from the hash and schedule it
 * for RCU freeing.
 */
static void itimer_delete(struct k_itimer *timer)
{}

/*
 * Invoked from do_exit() when the last thread of a thread group exits.
 * At that point no other task can access the timers of the dying
 * task anymore.
 */
void exit_itimers(struct task_struct *tsk)
{}

SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
		const struct __kernel_timespec __user *, tp)
{}

SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
		struct __kernel_timespec __user *, tp)
{}

int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx)
{}

SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
		struct __kernel_timex __user *, utx)
{}

/**
 * sys_clock_getres - Get the resolution of a clock
 * @which_clock:	The clock to get the resolution for
 * @tp:			Pointer to a a user space timespec64 for storage
 *
 * POSIX defines:
 *
 * "The clock_getres() function shall return the resolution of any
 * clock. Clock resolutions are implementation-defined and cannot be set by
 * a process. If the argument res is not NULL, the resolution of the
 * specified clock shall be stored in the location pointed to by res. If
 * res is NULL, the clock resolution is not returned. If the time argument
 * of clock_settime() is not a multiple of res, then the value is truncated
 * to a multiple of res."
 *
 * Due to the various hardware constraints the real resolution can vary
 * wildly and even change during runtime when the underlying devices are
 * replaced. The kernel also can use hardware devices with different
 * resolutions for reading the time and for arming timers.
 *
 * The kernel therefore deviates from the POSIX spec in various aspects:
 *
 * 1) The resolution returned to user space
 *
 *    For CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_BOOTTIME, CLOCK_TAI,
 *    CLOCK_REALTIME_ALARM, CLOCK_BOOTTIME_ALAREM and CLOCK_MONOTONIC_RAW
 *    the kernel differentiates only two cases:
 *
 *    I)  Low resolution mode:
 *
 *	  When high resolution timers are disabled at compile or runtime
 *	  the resolution returned is nanoseconds per tick, which represents
 *	  the precision at which timers expire.
 *
 *    II) High resolution mode:
 *
 *	  When high resolution timers are enabled the resolution returned
 *	  is always one nanosecond independent of the actual resolution of
 *	  the underlying hardware devices.
 *
 *	  For CLOCK_*_ALARM the actual resolution depends on system
 *	  state. When system is running the resolution is the same as the
 *	  resolution of the other clocks. During suspend the actual
 *	  resolution is the resolution of the underlying RTC device which
 *	  might be way less precise than the clockevent device used during
 *	  running state.
 *
 *   For CLOCK_REALTIME_COARSE and CLOCK_MONOTONIC_COARSE the resolution
 *   returned is always nanoseconds per tick.
 *
 *   For CLOCK_PROCESS_CPUTIME and CLOCK_THREAD_CPUTIME the resolution
 *   returned is always one nanosecond under the assumption that the
 *   underlying scheduler clock has a better resolution than nanoseconds
 *   per tick.
 *
 *   For dynamic POSIX clocks (PTP devices) the resolution returned is
 *   always one nanosecond.
 *
 * 2) Affect on sys_clock_settime()
 *
 *    The kernel does not truncate the time which is handed in to
 *    sys_clock_settime(). The kernel internal timekeeping is always using
 *    nanoseconds precision independent of the clocksource device which is
 *    used to read the time from. The resolution of that device only
 *    affects the presicion of the time returned by sys_clock_gettime().
 *
 * Returns:
 *	0		Success. @tp contains the resolution
 *	-EINVAL		@which_clock is not a valid clock ID
 *	-EFAULT		Copying the resolution to @tp faulted
 *	-ENODEV		Dynamic POSIX clock is not backed by a device
 *	-EOPNOTSUPP	Dynamic POSIX clock does not support getres()
 */
SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
		struct __kernel_timespec __user *, tp)
{}

#ifdef CONFIG_COMPAT_32BIT_TIME

SYSCALL_DEFINE2(clock_settime32, clockid_t, which_clock,
		struct old_timespec32 __user *, tp)
{}

SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock,
		struct old_timespec32 __user *, tp)
{}

SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock,
		struct old_timex32 __user *, utp)
{}

SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock,
		struct old_timespec32 __user *, tp)
{}

#endif

/*
 * sys_clock_nanosleep() for CLOCK_REALTIME and CLOCK_TAI
 */
static int common_nsleep(const clockid_t which_clock, int flags,
			 const struct timespec64 *rqtp)
{}

/*
 * sys_clock_nanosleep() for CLOCK_MONOTONIC and CLOCK_BOOTTIME
 *
 * Absolute nanosleeps for these clocks are time-namespace adjusted.
 */
static int common_nsleep_timens(const clockid_t which_clock, int flags,
				const struct timespec64 *rqtp)
{}

SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
		const struct __kernel_timespec __user *, rqtp,
		struct __kernel_timespec __user *, rmtp)
{}

#ifdef CONFIG_COMPAT_32BIT_TIME

SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
		struct old_timespec32 __user *, rqtp,
		struct old_timespec32 __user *, rmtp)
{}

#endif

static const struct k_clock clock_realtime =;

static const struct k_clock clock_monotonic =;

static const struct k_clock clock_monotonic_raw =;

static const struct k_clock clock_realtime_coarse =;

static const struct k_clock clock_monotonic_coarse =;

static const struct k_clock clock_tai =;

static const struct k_clock clock_boottime =;

static const struct k_clock * const posix_clocks[] =;

static const struct k_clock *clockid_to_kclock(const clockid_t id)
{}