linux/kernel/time/posix-cpu-timers.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Implement CPU time clocks for the POSIX clock interface.
 */

#include <linux/sched/signal.h>
#include <linux/sched/cputime.h>
#include <linux/posix-timers.h>
#include <linux/errno.h>
#include <linux/math64.h>
#include <linux/uaccess.h>
#include <linux/kernel_stat.h>
#include <trace/events/timer.h>
#include <linux/tick.h>
#include <linux/workqueue.h>
#include <linux/compat.h>
#include <linux/sched/deadline.h>
#include <linux/task_work.h>

#include "posix-timers.h"

static void posix_cpu_timer_rearm(struct k_itimer *timer);

void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
{}

/*
 * Called after updating RLIMIT_CPU to run cpu timer and update
 * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
 * necessary. Needs siglock protection since other code may update the
 * expiration cache as well.
 *
 * Returns 0 on success, -ESRCH on failure.  Can fail if the task is exiting and
 * we cannot lock_task_sighand.  Cannot fail if task is current.
 */
int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
{}

/*
 * Functions for validating access to tasks.
 */
static struct pid *pid_for_clock(const clockid_t clock, bool gettime)
{}

static inline int validate_clock_permissions(const clockid_t clock)
{}

static inline enum pid_type clock_pid_type(const clockid_t clock)
{}

static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
{}

/*
 * Update expiry time from increment, and increase overrun count,
 * given the current clock sample.
 */
static u64 bump_cpu_timer(struct k_itimer *timer, u64 now)
{}

/* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
{}

static int
posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
{}

static int
posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
{}

/*
 * Sample a per-thread clock for the given task. clkid is validated.
 */
static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
{}

static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
{}

static void task_sample_cputime(struct task_struct *p, u64 *samples)
{}

static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
				       u64 *samples)
{}

/*
 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
 * to avoid race conditions with concurrent updates to cputime.
 */
static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
{}

static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
			      struct task_cputime *sum)
{}

/**
 * thread_group_sample_cputime - Sample cputime for a given task
 * @tsk:	Task for which cputime needs to be started
 * @samples:	Storage for time samples
 *
 * Called from sys_getitimer() to calculate the expiry time of an active
 * timer. That means group cputime accounting is already active. Called
 * with task sighand lock held.
 *
 * Updates @times with an uptodate sample of the thread group cputimes.
 */
void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
{}

/**
 * thread_group_start_cputime - Start cputime and return a sample
 * @tsk:	Task for which cputime needs to be started
 * @samples:	Storage for time samples
 *
 * The thread group cputime accounting is avoided when there are no posix
 * CPU timers armed. Before starting a timer it's required to check whether
 * the time accounting is active. If not, a full update of the atomic
 * accounting store needs to be done and the accounting enabled.
 *
 * Updates @times with an uptodate sample of the thread group cputimes.
 */
static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
{}

static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
{}

/*
 * Sample a process (thread group) clock for the given task clkid. If the
 * group's cputime accounting is already enabled, read the atomic
 * store. Otherwise a full update is required.  clkid is already validated.
 */
static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
				  bool start)
{}

static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
{}

/*
 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
 * new timer already all-zeros initialized.
 */
static int posix_cpu_timer_create(struct k_itimer *new_timer)
{}

static struct posix_cputimer_base *timer_base(struct k_itimer *timer,
					      struct task_struct *tsk)
{}

/*
 * Force recalculating the base earliest expiration on the next tick.
 * This will also re-evaluate the need to keep around the process wide
 * cputime counter and tick dependency and eventually shut these down
 * if necessary.
 */
static void trigger_base_recalc_expires(struct k_itimer *timer,
					struct task_struct *tsk)
{}

/*
 * Dequeue the timer and reset the base if it was its earliest expiration.
 * It makes sure the next tick recalculates the base next expiration so we
 * don't keep the costly process wide cputime counter around for a random
 * amount of time, along with the tick dependency.
 *
 * If another timer gets queued between this and the next tick, its
 * expiration will update the base next event if necessary on the next
 * tick.
 */
static void disarm_timer(struct k_itimer *timer, struct task_struct *p)
{}


/*
 * Clean up a CPU-clock timer that is about to be destroyed.
 * This is called from timer deletion with the timer already locked.
 * If we return TIMER_RETRY, it's necessary to release the timer's lock
 * and try again.  (This happens when the timer is in the middle of firing.)
 */
static int posix_cpu_timer_del(struct k_itimer *timer)
{}

static void cleanup_timerqueue(struct timerqueue_head *head)
{}

/*
 * Clean out CPU timers which are still armed when a thread exits. The
 * timers are only removed from the list. No other updates are done. The
 * corresponding posix timers are still accessible, but cannot be rearmed.
 *
 * This must be called with the siglock held.
 */
static void cleanup_timers(struct posix_cputimers *pct)
{}

/*
 * These are both called with the siglock held, when the current thread
 * is being reaped.  When the final (leader) thread in the group is reaped,
 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
 */
void posix_cpu_timers_exit(struct task_struct *tsk)
{}
void posix_cpu_timers_exit_group(struct task_struct *tsk)
{}

/*
 * Insert the timer on the appropriate list before any timers that
 * expire later.  This must be called with the sighand lock held.
 */
static void arm_timer(struct k_itimer *timer, struct task_struct *p)
{}

/*
 * The timer is locked, fire it and arrange for its reload.
 */
static void cpu_timer_fire(struct k_itimer *timer)
{}

/*
 * Guts of sys_timer_settime for CPU timers.
 * This is called with the timer locked and interrupts disabled.
 * If we return TIMER_RETRY, it's necessary to release the timer's lock
 * and try again.  (This happens when the timer is in the middle of firing.)
 */
static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
			       struct itimerspec64 *new, struct itimerspec64 *old)
{}

static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
{}

#define MAX_COLLECTED

static u64 collect_timerqueue(struct timerqueue_head *head,
			      struct list_head *firing, u64 now)
{}

static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
				    struct list_head *firing)
{}

static inline void check_dl_overrun(struct task_struct *tsk)
{}

static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
{}

/*
 * Check for any per-thread CPU timers that have fired and move them off
 * the tsk->cpu_timers[N] list onto the firing list.  Here we update the
 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
 */
static void check_thread_timers(struct task_struct *tsk,
				struct list_head *firing)
{}

static inline void stop_process_timers(struct signal_struct *sig)
{}

static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
			     u64 *expires, u64 cur_time, int signo)
{}

/*
 * Check for any per-thread CPU timers that have fired and move them
 * off the tsk->*_timers list onto the firing list.  Per-thread timers
 * have already been taken off.
 */
static void check_process_timers(struct task_struct *tsk,
				 struct list_head *firing)
{}

/*
 * This is called from the signal code (via posixtimer_rearm)
 * when the last timer signal was delivered and we have to reload the timer.
 */
static void posix_cpu_timer_rearm(struct k_itimer *timer)
{}

/**
 * task_cputimers_expired - Check whether posix CPU timers are expired
 *
 * @samples:	Array of current samples for the CPUCLOCK clocks
 * @pct:	Pointer to a posix_cputimers container
 *
 * Returns true if any member of @samples is greater than the corresponding
 * member of @pct->bases[CLK].nextevt. False otherwise
 */
static inline bool
task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
{}

/**
 * fastpath_timer_check - POSIX CPU timers fast path.
 *
 * @tsk:	The task (thread) being checked.
 *
 * Check the task and thread group timers.  If both are zero (there are no
 * timers set) return false.  Otherwise snapshot the task and thread group
 * timers and compare them with the corresponding expiration times.  Return
 * true if a timer has expired, else return false.
 */
static inline bool fastpath_timer_check(struct task_struct *tsk)
{}

static void handle_posix_cpu_timers(struct task_struct *tsk);

#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
static void posix_cpu_timers_work(struct callback_head *work)
{}

/*
 * Invoked from the posix-timer core when a cancel operation failed because
 * the timer is marked firing. The caller holds rcu_read_lock(), which
 * protects the timer and the task which is expiring it from being freed.
 */
static void posix_cpu_timer_wait_running(struct k_itimer *timr)
{}

static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
{}

/*
 * Clear existing posix CPU timers task work.
 */
void clear_posix_cputimers_work(struct task_struct *p)
{}

/*
 * Initialize posix CPU timers task work in init task. Out of line to
 * keep the callback static and to avoid header recursion hell.
 */
void __init posix_cputimers_init_work(void)
{}

/*
 * Note: All operations on tsk->posix_cputimer_work.scheduled happen either
 * in hard interrupt context or in task context with interrupts
 * disabled. Aside of that the writer/reader interaction is always in the
 * context of the current task, which means they are strict per CPU.
 */
static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
{}

static inline void __run_posix_cpu_timers(struct task_struct *tsk)
{}

static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
						unsigned long start)
{}
#else /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
static inline void __run_posix_cpu_timers(struct task_struct *tsk)
{
	lockdep_posixtimer_enter();
	handle_posix_cpu_timers(tsk);
	lockdep_posixtimer_exit();
}

static void posix_cpu_timer_wait_running(struct k_itimer *timr)
{
	cpu_relax();
}

static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
{
	spin_unlock_irq(&timr->it_lock);
	cpu_relax();
	spin_lock_irq(&timr->it_lock);
}

static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
{
	return false;
}

static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
						unsigned long start)
{
	return true;
}
#endif /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */

static void handle_posix_cpu_timers(struct task_struct *tsk)
{}

/*
 * This is called from the timer interrupt handler.  The irq handler has
 * already updated our counts.  We need to check if any timers fire now.
 * Interrupts are disabled.
 */
void run_posix_cpu_timers(void)
{}

/*
 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
 * The tsk->sighand->siglock must be held by the caller.
 */
void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
			   u64 *newval, u64 *oldval)
{}

static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
			    const struct timespec64 *rqtp)
{}

static long posix_cpu_nsleep_restart(struct restart_block *restart_block);

static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
			    const struct timespec64 *rqtp)
{}

static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
{}

#define PROCESS_CLOCK
#define THREAD_CLOCK

static int process_cpu_clock_getres(const clockid_t which_clock,
				    struct timespec64 *tp)
{}
static int process_cpu_clock_get(const clockid_t which_clock,
				 struct timespec64 *tp)
{}
static int process_cpu_timer_create(struct k_itimer *timer)
{}
static int process_cpu_nsleep(const clockid_t which_clock, int flags,
			      const struct timespec64 *rqtp)
{}
static int thread_cpu_clock_getres(const clockid_t which_clock,
				   struct timespec64 *tp)
{}
static int thread_cpu_clock_get(const clockid_t which_clock,
				struct timespec64 *tp)
{}
static int thread_cpu_timer_create(struct k_itimer *timer)
{}

const struct k_clock clock_posix_cpu =;

const struct k_clock clock_process =;

const struct k_clock clock_thread =;