linux/kernel/sched/idle.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Generic entry points for the idle threads and
 * implementation of the idle task scheduling class.
 *
 * (NOTE: these are not related to SCHED_IDLE batch scheduled
 *        tasks which are handled in sched/fair.c )
 */

/* Linker adds these: start and end of __cpuidle functions */
extern char __cpuidle_text_start[], __cpuidle_text_end[];

/**
 * sched_idle_set_state - Record idle state for the current CPU.
 * @idle_state: State to record.
 */
void sched_idle_set_state(struct cpuidle_state *idle_state)
{}

static int __read_mostly cpu_idle_force_poll;

void cpu_idle_poll_ctrl(bool enable)
{}

#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
static int __init cpu_idle_poll_setup(char *__unused)
{
	cpu_idle_force_poll = 1;

	return 1;
}
__setup("nohlt", cpu_idle_poll_setup);

static int __init cpu_idle_nopoll_setup(char *__unused)
{
	cpu_idle_force_poll = 0;

	return 1;
}
__setup("hlt", cpu_idle_nopoll_setup);
#endif

static noinline int __cpuidle cpu_idle_poll(void)
{}

/* Weak implementations for optional arch specific functions */
void __weak arch_cpu_idle_prepare(void) {}
void __weak arch_cpu_idle_enter(void) {}
void __weak arch_cpu_idle_exit(void) {}
void __weak __noreturn arch_cpu_idle_dead(void) {}
void __weak arch_cpu_idle(void)
{}

#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST_IDLE
DEFINE_STATIC_KEY_FALSE(arch_needs_tick_broadcast);

static inline void cond_tick_broadcast_enter(void)
{}

static inline void cond_tick_broadcast_exit(void)
{}
#else
static inline void cond_tick_broadcast_enter(void) { }
static inline void cond_tick_broadcast_exit(void) { }
#endif

/**
 * default_idle_call - Default CPU idle routine.
 *
 * To use when the cpuidle framework cannot be used.
 */
void __cpuidle default_idle_call(void)
{}

static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
			       struct cpuidle_device *dev)
{}

static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
		      int next_state)
{}

/**
 * cpuidle_idle_call - the main idle function
 *
 * NOTE: no locks or semaphores should be used here
 *
 * On architectures that support TIF_POLLING_NRFLAG, is called with polling
 * set, and it returns with polling set.  If it ever stops polling, it
 * must clear the polling bit.
 */
static void cpuidle_idle_call(void)
{}

/*
 * Generic idle loop implementation
 *
 * Called with polling cleared.
 */
static void do_idle(void)
{}

bool cpu_in_idle(unsigned long pc)
{}

struct idle_timer {};

static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
{}

void play_idle_precise(u64 duration_ns, u64 latency_ns)
{}
EXPORT_SYMBOL_GPL();

void cpu_startup_entry(enum cpuhp_state state)
{}

/*
 * idle-task scheduling class.
 */

#ifdef CONFIG_SMP
static int
select_task_rq_idle(struct task_struct *p, int cpu, int flags)
{}

static int
balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{}
#endif

/*
 * Idle tasks are unconditionally rescheduled:
 */
static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags)
{}

static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{}

static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
{}

#ifdef CONFIG_SMP
static struct task_struct *pick_task_idle(struct rq *rq)
{}
#endif

struct task_struct *pick_next_task_idle(struct rq *rq)
{}

/*
 * It is not legal to sleep in the idle task - print a warning
 * message if some code attempts to do it:
 */
static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
{}

/*
 * scheduler tick hitting a task of our scheduling class.
 *
 * NOTE: This function can be called remotely by the tick offload that
 * goes along full dynticks. Therefore no local assumption can be made
 * and everything must be accessed through the @rq and @curr passed in
 * parameters.
 */
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
{}

static void switched_to_idle(struct rq *rq, struct task_struct *p)
{}

static void
prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
{}

static void update_curr_idle(struct rq *rq)
{}

/*
 * Simple, special scheduling class for the per-CPU idle tasks:
 */
DEFINE_SCHED_CLASS(idle) =;