#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#include <asm/cputime.h>
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
static int sched_clock_irqtime;
void enable_sched_clock_irqtime(void)
{ … }
void disable_sched_clock_irqtime(void)
{ … }
static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
enum cpu_usage_stat idx)
{ … }
void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
{ … }
static u64 irqtime_tick_accounted(u64 maxtime)
{ … }
#else
#define sched_clock_irqtime …
static u64 irqtime_tick_accounted(u64 dummy)
{
return 0;
}
#endif
static inline void task_group_account_field(struct task_struct *p, int index,
u64 tmp)
{ … }
void account_user_time(struct task_struct *p, u64 cputime)
{ … }
void account_guest_time(struct task_struct *p, u64 cputime)
{ … }
void account_system_index_time(struct task_struct *p,
u64 cputime, enum cpu_usage_stat index)
{ … }
void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
{ … }
void account_steal_time(u64 cputime)
{ … }
void account_idle_time(u64 cputime)
{ … }
#ifdef CONFIG_SCHED_CORE
void __account_forceidle_time(struct task_struct *p, u64 delta)
{ … }
#endif
static __always_inline u64 steal_account_process_time(u64 maxtime)
{ … }
static inline u64 account_other_time(u64 max)
{ … }
#ifdef CONFIG_64BIT
static inline u64 read_sum_exec_runtime(struct task_struct *t)
{ … }
#else
static u64 read_sum_exec_runtime(struct task_struct *t)
{
u64 ns;
struct rq_flags rf;
struct rq *rq;
rq = task_rq_lock(t, &rf);
ns = t->se.sum_exec_runtime;
task_rq_unlock(rq, t, &rf);
return ns;
}
#endif
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
{ … }
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
int ticks)
{ … }
static void irqtime_account_idle_ticks(int ticks)
{ … }
#else
static inline void irqtime_account_idle_ticks(int ticks) { }
static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
int nr_ticks) { }
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
{
unsigned int pc = irq_count() - offset;
if (pc & HARDIRQ_OFFSET) {
vtime_account_hardirq(tsk);
} else if (pc & SOFTIRQ_OFFSET) {
vtime_account_softirq(tsk);
} else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) &&
is_idle_task(tsk)) {
vtime_account_idle(tsk);
} else {
vtime_account_kernel(tsk);
}
}
void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
u64 *ut, u64 *st)
{
*ut = curr->utime;
*st = curr->stime;
}
void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
*ut = p->utime;
*st = p->stime;
}
EXPORT_SYMBOL_GPL(task_cputime_adjusted);
void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
struct task_cputime cputime;
thread_group_cputime(p, &cputime);
*ut = cputime.utime;
*st = cputime.stime;
}
#else
void account_process_tick(struct task_struct *p, int user_tick)
{ … }
void account_idle_ticks(unsigned long ticks)
{ … }
void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
u64 *ut, u64 *st)
{ … }
void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{ … }
EXPORT_SYMBOL_GPL(…);
void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{ … }
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
static u64 vtime_delta(struct vtime *vtime)
{
unsigned long long clock;
clock = sched_clock();
if (clock < vtime->starttime)
return 0;
return clock - vtime->starttime;
}
static u64 get_vtime_delta(struct vtime *vtime)
{
u64 delta = vtime_delta(vtime);
u64 other;
other = account_other_time(delta);
WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
vtime->starttime += delta;
return delta - other;
}
static void vtime_account_system(struct task_struct *tsk,
struct vtime *vtime)
{
vtime->stime += get_vtime_delta(vtime);
if (vtime->stime >= TICK_NSEC) {
account_system_time(tsk, irq_count(), vtime->stime);
vtime->stime = 0;
}
}
static void vtime_account_guest(struct task_struct *tsk,
struct vtime *vtime)
{
vtime->gtime += get_vtime_delta(vtime);
if (vtime->gtime >= TICK_NSEC) {
account_guest_time(tsk, vtime->gtime);
vtime->gtime = 0;
}
}
static void __vtime_account_kernel(struct task_struct *tsk,
struct vtime *vtime)
{
if (vtime->state == VTIME_GUEST)
vtime_account_guest(tsk, vtime);
else
vtime_account_system(tsk, vtime);
}
void vtime_account_kernel(struct task_struct *tsk)
{
struct vtime *vtime = &tsk->vtime;
if (!vtime_delta(vtime))
return;
write_seqcount_begin(&vtime->seqcount);
__vtime_account_kernel(tsk, vtime);
write_seqcount_end(&vtime->seqcount);
}
void vtime_user_enter(struct task_struct *tsk)
{
struct vtime *vtime = &tsk->vtime;
write_seqcount_begin(&vtime->seqcount);
vtime_account_system(tsk, vtime);
vtime->state = VTIME_USER;
write_seqcount_end(&vtime->seqcount);
}
void vtime_user_exit(struct task_struct *tsk)
{
struct vtime *vtime = &tsk->vtime;
write_seqcount_begin(&vtime->seqcount);
vtime->utime += get_vtime_delta(vtime);
if (vtime->utime >= TICK_NSEC) {
account_user_time(tsk, vtime->utime);
vtime->utime = 0;
}
vtime->state = VTIME_SYS;
write_seqcount_end(&vtime->seqcount);
}
void vtime_guest_enter(struct task_struct *tsk)
{
struct vtime *vtime = &tsk->vtime;
write_seqcount_begin(&vtime->seqcount);
vtime_account_system(tsk, vtime);
tsk->flags |= PF_VCPU;
vtime->state = VTIME_GUEST;
write_seqcount_end(&vtime->seqcount);
}
EXPORT_SYMBOL_GPL(vtime_guest_enter);
void vtime_guest_exit(struct task_struct *tsk)
{
struct vtime *vtime = &tsk->vtime;
write_seqcount_begin(&vtime->seqcount);
vtime_account_guest(tsk, vtime);
tsk->flags &= ~PF_VCPU;
vtime->state = VTIME_SYS;
write_seqcount_end(&vtime->seqcount);
}
EXPORT_SYMBOL_GPL(vtime_guest_exit);
void vtime_account_idle(struct task_struct *tsk)
{
account_idle_time(get_vtime_delta(&tsk->vtime));
}
void vtime_task_switch_generic(struct task_struct *prev)
{
struct vtime *vtime = &prev->vtime;
write_seqcount_begin(&vtime->seqcount);
if (vtime->state == VTIME_IDLE)
vtime_account_idle(prev);
else
__vtime_account_kernel(prev, vtime);
vtime->state = VTIME_INACTIVE;
vtime->cpu = -1;
write_seqcount_end(&vtime->seqcount);
vtime = ¤t->vtime;
write_seqcount_begin(&vtime->seqcount);
if (is_idle_task(current))
vtime->state = VTIME_IDLE;
else if (current->flags & PF_VCPU)
vtime->state = VTIME_GUEST;
else
vtime->state = VTIME_SYS;
vtime->starttime = sched_clock();
vtime->cpu = smp_processor_id();
write_seqcount_end(&vtime->seqcount);
}
void vtime_init_idle(struct task_struct *t, int cpu)
{
struct vtime *vtime = &t->vtime;
unsigned long flags;
local_irq_save(flags);
write_seqcount_begin(&vtime->seqcount);
vtime->state = VTIME_IDLE;
vtime->starttime = sched_clock();
vtime->cpu = cpu;
write_seqcount_end(&vtime->seqcount);
local_irq_restore(flags);
}
u64 task_gtime(struct task_struct *t)
{
struct vtime *vtime = &t->vtime;
unsigned int seq;
u64 gtime;
if (!vtime_accounting_enabled())
return t->gtime;
do {
seq = read_seqcount_begin(&vtime->seqcount);
gtime = t->gtime;
if (vtime->state == VTIME_GUEST)
gtime += vtime->gtime + vtime_delta(vtime);
} while (read_seqcount_retry(&vtime->seqcount, seq));
return gtime;
}
bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
{
struct vtime *vtime = &t->vtime;
unsigned int seq;
u64 delta;
int ret;
if (!vtime_accounting_enabled()) {
*utime = t->utime;
*stime = t->stime;
return false;
}
do {
ret = false;
seq = read_seqcount_begin(&vtime->seqcount);
*utime = t->utime;
*stime = t->stime;
if (vtime->state < VTIME_SYS)
continue;
ret = true;
delta = vtime_delta(vtime);
if (vtime->state == VTIME_SYS)
*stime += vtime->stime + delta;
else
*utime += vtime->utime + delta;
} while (read_seqcount_retry(&vtime->seqcount, seq));
return ret;
}
static int vtime_state_fetch(struct vtime *vtime, int cpu)
{
int state = READ_ONCE(vtime->state);
if (vtime->cpu != cpu && vtime->cpu != -1)
return -EAGAIN;
if (state == VTIME_INACTIVE)
return -EAGAIN;
return state;
}
static u64 kcpustat_user_vtime(struct vtime *vtime)
{
if (vtime->state == VTIME_USER)
return vtime->utime + vtime_delta(vtime);
else if (vtime->state == VTIME_GUEST)
return vtime->gtime + vtime_delta(vtime);
return 0;
}
static int kcpustat_field_vtime(u64 *cpustat,
struct task_struct *tsk,
enum cpu_usage_stat usage,
int cpu, u64 *val)
{
struct vtime *vtime = &tsk->vtime;
unsigned int seq;
do {
int state;
seq = read_seqcount_begin(&vtime->seqcount);
state = vtime_state_fetch(vtime, cpu);
if (state < 0)
return state;
*val = cpustat[usage];
switch (usage) {
case CPUTIME_SYSTEM:
if (state == VTIME_SYS)
*val += vtime->stime + vtime_delta(vtime);
break;
case CPUTIME_USER:
if (task_nice(tsk) <= 0)
*val += kcpustat_user_vtime(vtime);
break;
case CPUTIME_NICE:
if (task_nice(tsk) > 0)
*val += kcpustat_user_vtime(vtime);
break;
case CPUTIME_GUEST:
if (state == VTIME_GUEST && task_nice(tsk) <= 0)
*val += vtime->gtime + vtime_delta(vtime);
break;
case CPUTIME_GUEST_NICE:
if (state == VTIME_GUEST && task_nice(tsk) > 0)
*val += vtime->gtime + vtime_delta(vtime);
break;
default:
break;
}
} while (read_seqcount_retry(&vtime->seqcount, seq));
return 0;
}
u64 kcpustat_field(struct kernel_cpustat *kcpustat,
enum cpu_usage_stat usage, int cpu)
{
u64 *cpustat = kcpustat->cpustat;
u64 val = cpustat[usage];
struct rq *rq;
int err;
if (!vtime_accounting_enabled_cpu(cpu))
return val;
rq = cpu_rq(cpu);
for (;;) {
struct task_struct *curr;
rcu_read_lock();
curr = rcu_dereference(rq->curr);
if (WARN_ON_ONCE(!curr)) {
rcu_read_unlock();
return cpustat[usage];
}
err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
rcu_read_unlock();
if (!err)
return val;
cpu_relax();
}
}
EXPORT_SYMBOL_GPL(kcpustat_field);
static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
const struct kernel_cpustat *src,
struct task_struct *tsk, int cpu)
{
struct vtime *vtime = &tsk->vtime;
unsigned int seq;
do {
u64 *cpustat;
u64 delta;
int state;
seq = read_seqcount_begin(&vtime->seqcount);
state = vtime_state_fetch(vtime, cpu);
if (state < 0)
return state;
*dst = *src;
cpustat = dst->cpustat;
if (state < VTIME_SYS)
continue;
delta = vtime_delta(vtime);
if (state == VTIME_SYS) {
cpustat[CPUTIME_SYSTEM] += vtime->stime + delta;
} else if (state == VTIME_USER) {
if (task_nice(tsk) > 0)
cpustat[CPUTIME_NICE] += vtime->utime + delta;
else
cpustat[CPUTIME_USER] += vtime->utime + delta;
} else {
WARN_ON_ONCE(state != VTIME_GUEST);
if (task_nice(tsk) > 0) {
cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta;
cpustat[CPUTIME_NICE] += vtime->gtime + delta;
} else {
cpustat[CPUTIME_GUEST] += vtime->gtime + delta;
cpustat[CPUTIME_USER] += vtime->gtime + delta;
}
}
} while (read_seqcount_retry(&vtime->seqcount, seq));
return 0;
}
void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
{
const struct kernel_cpustat *src = &kcpustat_cpu(cpu);
struct rq *rq;
int err;
if (!vtime_accounting_enabled_cpu(cpu)) {
*dst = *src;
return;
}
rq = cpu_rq(cpu);
for (;;) {
struct task_struct *curr;
rcu_read_lock();
curr = rcu_dereference(rq->curr);
if (WARN_ON_ONCE(!curr)) {
rcu_read_unlock();
*dst = *src;
return;
}
err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
rcu_read_unlock();
if (!err)
return;
cpu_relax();
}
}
EXPORT_SYMBOL_GPL(kcpustat_cpu_fetch);
#endif