#ifndef _LINUX_KERNEL_VTIME_H
#define _LINUX_KERNEL_VTIME_H
#include <linux/context_tracking_state.h>
#include <linux/sched.h>
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void vtime_account_kernel(struct task_struct *tsk);
extern void vtime_account_idle(struct task_struct *tsk);
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void vtime_user_enter(struct task_struct *tsk);
extern void vtime_user_exit(struct task_struct *tsk);
extern void vtime_guest_enter(struct task_struct *tsk);
extern void vtime_guest_exit(struct task_struct *tsk);
extern void vtime_init_idle(struct task_struct *tsk, int cpu);
#else
static inline void vtime_user_enter(struct task_struct *tsk) { … }
static inline void vtime_user_exit(struct task_struct *tsk) { … }
static inline void vtime_guest_enter(struct task_struct *tsk) { … }
static inline void vtime_guest_exit(struct task_struct *tsk) { … }
static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { … }
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset);
extern void vtime_account_softirq(struct task_struct *tsk);
extern void vtime_account_hardirq(struct task_struct *tsk);
extern void vtime_flush(struct task_struct *tsk);
#else
static inline void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { … }
static inline void vtime_account_softirq(struct task_struct *tsk) { … }
static inline void vtime_account_hardirq(struct task_struct *tsk) { … }
static inline void vtime_flush(struct task_struct *tsk) { … }
#endif
#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
static inline bool vtime_accounting_enabled_this_cpu(void) { return true; }
extern void vtime_task_switch(struct task_struct *prev);
static __always_inline void vtime_account_guest_enter(void)
{
vtime_account_kernel(current);
current->flags |= PF_VCPU;
}
static __always_inline void vtime_account_guest_exit(void)
{
vtime_account_kernel(current);
current->flags &= ~PF_VCPU;
}
#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
static inline bool vtime_accounting_enabled(void)
{
return context_tracking_enabled();
}
static inline bool vtime_accounting_enabled_cpu(int cpu)
{
return context_tracking_enabled_cpu(cpu);
}
static inline bool vtime_accounting_enabled_this_cpu(void)
{
return context_tracking_enabled_this_cpu();
}
extern void vtime_task_switch_generic(struct task_struct *prev);
static inline void vtime_task_switch(struct task_struct *prev)
{
if (vtime_accounting_enabled_this_cpu())
vtime_task_switch_generic(prev);
}
static __always_inline void vtime_account_guest_enter(void)
{
if (vtime_accounting_enabled_this_cpu())
vtime_guest_enter(current);
else
current->flags |= PF_VCPU;
}
static __always_inline void vtime_account_guest_exit(void)
{
if (vtime_accounting_enabled_this_cpu())
vtime_guest_exit(current);
else
current->flags &= ~PF_VCPU;
}
#else
static inline bool vtime_accounting_enabled_this_cpu(void) { … }
static inline void vtime_task_switch(struct task_struct *prev) { … }
static __always_inline void vtime_account_guest_enter(void)
{ … }
static __always_inline void vtime_account_guest_exit(void)
{ … }
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
extern void irqtime_account_irq(struct task_struct *tsk, unsigned int offset);
#else
static inline void irqtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
#endif
static inline void account_softirq_enter(struct task_struct *tsk)
{ … }
static inline void account_softirq_exit(struct task_struct *tsk)
{ … }
static inline void account_hardirq_enter(struct task_struct *tsk)
{ … }
static inline void account_hardirq_exit(struct task_struct *tsk)
{ … }
#endif