linux/kernel/trace/trace_sched_wakeup.c

// SPDX-License-Identifier: GPL-2.0
/*
 * trace task wakeup timings
 *
 * Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
 * Copyright (C) 2008 Ingo Molnar <[email protected]>
 *
 * Based on code from the latency_tracer, that is:
 *
 *  Copyright (C) 2004-2006 Ingo Molnar
 *  Copyright (C) 2004 Nadia Yvette Chambers
 */
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/sched/rt.h>
#include <linux/sched/deadline.h>
#include <trace/events/sched.h>
#include "trace.h"

static struct trace_array	*wakeup_trace;
static int __read_mostly	tracer_enabled;

static struct task_struct	*wakeup_task;
static int			wakeup_cpu;
static int			wakeup_current_cpu;
static unsigned			wakeup_prio =;
static bool			wakeup_rt;
static bool			wakeup_dl;
static bool			tracing_dl;

static arch_spinlock_t wakeup_lock =;

static void wakeup_reset(struct trace_array *tr);
static void __wakeup_reset(struct trace_array *tr);
static int start_func_tracer(struct trace_array *tr, int graph);
static void stop_func_tracer(struct trace_array *tr, int graph);

static int save_flags;

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define is_graph(tr)
#else
#define is_graph
#endif

#ifdef CONFIG_FUNCTION_TRACER

static bool function_enabled;

/*
 * Prologue for the wakeup function tracers.
 *
 * Returns 1 if it is OK to continue, and preemption
 *            is disabled and data->disabled is incremented.
 *         0 if the trace is to be ignored, and preemption
 *            is not disabled and data->disabled is
 *            kept the same.
 *
 * Note, this function is also used outside this ifdef but
 *  inside the #ifdef of the function graph tracer below.
 *  This is OK, since the function graph tracer is
 *  dependent on the function tracer.
 */
static int
func_prolog_preempt_disable(struct trace_array *tr,
			    struct trace_array_cpu **data,
			    unsigned int *trace_ctx)
{}

#ifdef CONFIG_FUNCTION_GRAPH_TRACER

static int wakeup_display_graph(struct trace_array *tr, int set)
{}

static int wakeup_graph_entry(struct ftrace_graph_ent *trace,
			      struct fgraph_ops *gops)
{}

static void wakeup_graph_return(struct ftrace_graph_ret *trace,
				struct fgraph_ops *gops)
{}

static struct fgraph_ops fgraph_wakeup_ops =;

static void wakeup_trace_open(struct trace_iterator *iter)
{}

static void wakeup_trace_close(struct trace_iterator *iter)
{}

#define GRAPH_TRACER_FLAGS

static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
{}

static void wakeup_print_header(struct seq_file *s)
{}
#endif /* else CONFIG_FUNCTION_GRAPH_TRACER */

/*
 * wakeup uses its own tracer function to keep the overhead down:
 */
static void
wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
		   struct ftrace_ops *op, struct ftrace_regs *fregs)
{}

static int register_wakeup_function(struct trace_array *tr, int graph, int set)
{}

static void unregister_wakeup_function(struct trace_array *tr, int graph)
{}

static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
{}
#else /* CONFIG_FUNCTION_TRACER */
static int register_wakeup_function(struct trace_array *tr, int graph, int set)
{
	return 0;
}
static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
{
	return 0;
}
#endif /* else CONFIG_FUNCTION_TRACER */

#ifndef CONFIG_FUNCTION_GRAPH_TRACER
static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
{
	return TRACE_TYPE_UNHANDLED;
}

static void wakeup_trace_open(struct trace_iterator *iter) { }
static void wakeup_trace_close(struct trace_iterator *iter) { }

static void wakeup_print_header(struct seq_file *s)
{
	trace_default_header(s);
}
#endif /* !CONFIG_FUNCTION_GRAPH_TRACER */

static void
__trace_function(struct trace_array *tr,
		 unsigned long ip, unsigned long parent_ip,
		 unsigned int trace_ctx)
{}

static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
{}

static int start_func_tracer(struct trace_array *tr, int graph)
{}

static void stop_func_tracer(struct trace_array *tr, int graph)
{}

/*
 * Should this new latency be reported/recorded?
 */
static bool report_latency(struct trace_array *tr, u64 delta)
{}

static void
probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
{}

static void
tracing_sched_switch_trace(struct trace_array *tr,
			   struct task_struct *prev,
			   struct task_struct *next,
			   unsigned int trace_ctx)
{}

static void
tracing_sched_wakeup_trace(struct trace_array *tr,
			   struct task_struct *wakee,
			   struct task_struct *curr,
			   unsigned int trace_ctx)
{}

static void notrace
probe_wakeup_sched_switch(void *ignore, bool preempt,
			  struct task_struct *prev, struct task_struct *next,
			  unsigned int prev_state)
{}

static void __wakeup_reset(struct trace_array *tr)
{}

static void wakeup_reset(struct trace_array *tr)
{}

static void
probe_wakeup(void *ignore, struct task_struct *p)
{}

static void start_wakeup_tracer(struct trace_array *tr)
{}

static void stop_wakeup_tracer(struct trace_array *tr)
{}

static bool wakeup_busy;

static int __wakeup_tracer_init(struct trace_array *tr)
{}

static int wakeup_tracer_init(struct trace_array *tr)
{}

static int wakeup_rt_tracer_init(struct trace_array *tr)
{}

static int wakeup_dl_tracer_init(struct trace_array *tr)
{}

static void wakeup_tracer_reset(struct trace_array *tr)
{}

static void wakeup_tracer_start(struct trace_array *tr)
{}

static void wakeup_tracer_stop(struct trace_array *tr)
{}

static struct tracer wakeup_tracer __read_mostly =;

static struct tracer wakeup_rt_tracer __read_mostly =;

static struct tracer wakeup_dl_tracer __read_mostly =;

__init static int init_wakeup_tracer(void)
{}
core_initcall(init_wakeup_tracer);