linux/kernel/sched/debug.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * kernel/sched/debug.c
 *
 * Print the CFS rbtree and other debugging details
 *
 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
 */

/*
 * This allows printing both to /sys/kernel/debug/sched/debug and
 * to the console
 */
#define SEQ_printf(m, x...)

/*
 * Ease the printing of nsec fields:
 */
static long long nsec_high(unsigned long long nsec)
{}

static unsigned long nsec_low(unsigned long long nsec)
{}

#define SPLIT_NS(x)

#define SCHED_FEAT

static const char * const sched_feat_names[] =;

#undef SCHED_FEAT

static int sched_feat_show(struct seq_file *m, void *v)
{}

#ifdef CONFIG_JUMP_LABEL

#define jump_label_key__true
#define jump_label_key__false

#define SCHED_FEAT

struct static_key sched_feat_keys[__SCHED_FEAT_NR] =;

#undef SCHED_FEAT

static void sched_feat_disable(int i)
{}

static void sched_feat_enable(int i)
{}
#else
static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
#endif /* CONFIG_JUMP_LABEL */

static int sched_feat_set(char *cmp)
{}

static ssize_t
sched_feat_write(struct file *filp, const char __user *ubuf,
		size_t cnt, loff_t *ppos)
{}

static int sched_feat_open(struct inode *inode, struct file *filp)
{}

static const struct file_operations sched_feat_fops =;

#ifdef CONFIG_SMP

static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
				   size_t cnt, loff_t *ppos)
{}

static int sched_scaling_show(struct seq_file *m, void *v)
{}

static int sched_scaling_open(struct inode *inode, struct file *filp)
{}

static const struct file_operations sched_scaling_fops =;

#endif /* SMP */

#ifdef CONFIG_PREEMPT_DYNAMIC

static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
				   size_t cnt, loff_t *ppos)
{}

static int sched_dynamic_show(struct seq_file *m, void *v)
{}

static int sched_dynamic_open(struct inode *inode, struct file *filp)
{}

static const struct file_operations sched_dynamic_fops =;

#endif /* CONFIG_PREEMPT_DYNAMIC */

__read_mostly bool sched_debug_verbose;

#ifdef CONFIG_SMP
static struct dentry           *sd_dentry;


static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
				  size_t cnt, loff_t *ppos)
{}
#else
#define sched_verbose_write
#endif

static const struct file_operations sched_verbose_fops =;

static const struct seq_operations sched_debug_sops;

static int sched_debug_open(struct inode *inode, struct file *filp)
{}

static const struct file_operations sched_debug_fops =;

static struct dentry *debugfs_sched;

static __init int sched_init_debug(void)
{}
late_initcall(sched_init_debug);

#ifdef CONFIG_SMP

static cpumask_var_t		sd_sysctl_cpus;

static int sd_flags_show(struct seq_file *m, void *v)
{}

static int sd_flags_open(struct inode *inode, struct file *file)
{}

static const struct file_operations sd_flags_fops =;

static void register_sd(struct sched_domain *sd, struct dentry *parent)
{}

void update_sched_domain_debugfs(void)
{}

void dirty_sched_domain_sysctl(int cpu)
{}

#endif /* CONFIG_SMP */

#ifdef CONFIG_FAIR_GROUP_SCHED
static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
{}
#endif

#ifdef CONFIG_CGROUP_SCHED
static DEFINE_SPINLOCK(sched_debug_lock);
static char group_path[PATH_MAX];

static void task_group_path(struct task_group *tg, char *path, int plen)
{}

/*
 * Only 1 SEQ_printf_task_group_path() caller can use the full length
 * group_path[] for cgroup path. Other simultaneous callers will have
 * to use a shorter stack buffer. A "..." suffix is appended at the end
 * of the stack buffer so that it will show up in case the output length
 * matches the given buffer size to indicate possible path name truncation.
 */
#define SEQ_printf_task_group_path(m, tg, fmt...)
#endif

static void
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
{}

static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
{}

void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{}

void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
{}

void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
{}

static void print_cpu(struct seq_file *m, int cpu)
{}

static const char *sched_tunable_scaling_names[] =;

static void sched_debug_header(struct seq_file *m)
{}

static int sched_debug_show(struct seq_file *m, void *v)
{}

void sysrq_sched_debug_show(void)
{}

/*
 * This iterator needs some explanation.
 * It returns 1 for the header position.
 * This means 2 is CPU 0.
 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
 * to use cpumask_* to iterate over the CPUs.
 */
static void *sched_debug_start(struct seq_file *file, loff_t *offset)
{}

static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
{}

static void sched_debug_stop(struct seq_file *file, void *data)
{}

static const struct seq_operations sched_debug_sops =;

#define __PS(S, F)
#define __P(F)
#define P(F)
#define PM(F, M)
#define __PSN(S, F)
#define __PN(F)
#define PN(F)


#ifdef CONFIG_NUMA_BALANCING
void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
		unsigned long tpf, unsigned long gsf, unsigned long gpf)
{}
#endif


static void sched_show_numa(struct task_struct *p, struct seq_file *m)
{}

void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
						  struct seq_file *m)
{}

void proc_sched_set_task(struct task_struct *p)
{}

void resched_latency_warn(int cpu, u64 latency)
{}