linux/kernel/trace/trace_irqsoff.c

// SPDX-License-Identifier: GPL-2.0
/*
 * trace irqs off critical timings
 *
 * Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
 * Copyright (C) 2008 Ingo Molnar <[email protected]>
 *
 * From code in the latency_tracer, that is:
 *
 *  Copyright (C) 2004-2006 Ingo Molnar
 *  Copyright (C) 2004 Nadia Yvette Chambers
 */
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/kprobes.h>

#include "trace.h"

#include <trace/events/preemptirq.h>

#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
static struct trace_array		*irqsoff_trace __read_mostly;
static int				tracer_enabled __read_mostly;

static DEFINE_PER_CPU(int, tracing_cpu);

static DEFINE_RAW_SPINLOCK(max_trace_lock);

enum {};

static int trace_type __read_mostly;

static int save_flags;

static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
static int start_irqsoff_tracer(struct trace_array *tr, int graph);

#ifdef CONFIG_PREEMPT_TRACER
static inline int
preempt_trace(int pc)
{}
#else
#define preempt_trace
#endif

#ifdef CONFIG_IRQSOFF_TRACER
static inline int
irq_trace(void)
{}
#else
#define irq_trace
#endif

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int irqsoff_display_graph(struct trace_array *tr, int set);
#define is_graph(tr)
#else
static inline int irqsoff_display_graph(struct trace_array *tr, int set)
{
	return -EINVAL;
}
#define is_graph
#endif

/*
 * Sequence count - we record it when starting a measurement and
 * skip the latency if the sequence has changed - some other section
 * did a maximum and could disturb our measurement with serial console
 * printouts, etc. Truly coinciding maximum latencies should be rare
 * and what happens together happens separately as well, so this doesn't
 * decrease the validity of the maximum found:
 */
static __cacheline_aligned_in_smp	unsigned long max_sequence;

#ifdef CONFIG_FUNCTION_TRACER
/*
 * Prologue for the preempt and irqs off function tracers.
 *
 * Returns 1 if it is OK to continue, and data->disabled is
 *            incremented.
 *         0 if the trace is to be ignored, and data->disabled
 *            is kept the same.
 *
 * Note, this function is also used outside this ifdef but
 *  inside the #ifdef of the function graph tracer below.
 *  This is OK, since the function graph tracer is
 *  dependent on the function tracer.
 */
static int func_prolog_dec(struct trace_array *tr,
			   struct trace_array_cpu **data,
			   unsigned long *flags)
{}

/*
 * irqsoff uses its own tracer function to keep the overhead down:
 */
static void
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
		    struct ftrace_ops *op, struct ftrace_regs *fregs)
{}
#endif /* CONFIG_FUNCTION_TRACER */

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int irqsoff_display_graph(struct trace_array *tr, int set)
{}

static int irqsoff_graph_entry(struct ftrace_graph_ent *trace,
			       struct fgraph_ops *gops)
{}

static void irqsoff_graph_return(struct ftrace_graph_ret *trace,
				 struct fgraph_ops *gops)
{}

static struct fgraph_ops fgraph_ops =;

static void irqsoff_trace_open(struct trace_iterator *iter)
{}

static void irqsoff_trace_close(struct trace_iterator *iter)
{}

#define GRAPH_TRACER_FLAGS

static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
{}

static void irqsoff_print_header(struct seq_file *s)
{}

static void
__trace_function(struct trace_array *tr,
		 unsigned long ip, unsigned long parent_ip,
		 unsigned int trace_ctx)
{}

#else
#define __trace_function

static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
{
	return TRACE_TYPE_UNHANDLED;
}

static void irqsoff_trace_open(struct trace_iterator *iter) { }
static void irqsoff_trace_close(struct trace_iterator *iter) { }

#ifdef CONFIG_FUNCTION_TRACER
static void irqsoff_print_header(struct seq_file *s)
{
	trace_default_header(s);
}
#else
static void irqsoff_print_header(struct seq_file *s)
{
	trace_latency_header(s);
}
#endif /* CONFIG_FUNCTION_TRACER */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

/*
 * Should this new latency be reported/recorded?
 */
static bool report_latency(struct trace_array *tr, u64 delta)
{}

static void
check_critical_timing(struct trace_array *tr,
		      struct trace_array_cpu *data,
		      unsigned long parent_ip,
		      int cpu)
{}

static nokprobe_inline void
start_critical_timing(unsigned long ip, unsigned long parent_ip)
{}

static nokprobe_inline void
stop_critical_timing(unsigned long ip, unsigned long parent_ip)
{}

/* start and stop critical timings used to for stoppage (in idle) */
void start_critical_timings(void)
{}
EXPORT_SYMBOL_GPL();
NOKPROBE_SYMBOL(start_critical_timings);

void stop_critical_timings(void)
{}
EXPORT_SYMBOL_GPL();
NOKPROBE_SYMBOL(stop_critical_timings);

#ifdef CONFIG_FUNCTION_TRACER
static bool function_enabled;

static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
{}

static void unregister_irqsoff_function(struct trace_array *tr, int graph)
{}

static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
{}
#else
static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
{
	return 0;
}
static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
{
	return 0;
}
#endif /* CONFIG_FUNCTION_TRACER */

static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
{}

static int start_irqsoff_tracer(struct trace_array *tr, int graph)
{}

static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
{}

static bool irqsoff_busy;

static int __irqsoff_tracer_init(struct trace_array *tr)
{}

static void __irqsoff_tracer_reset(struct trace_array *tr)
{}

static void irqsoff_tracer_start(struct trace_array *tr)
{}

static void irqsoff_tracer_stop(struct trace_array *tr)
{}

#ifdef CONFIG_IRQSOFF_TRACER
/*
 * We are only interested in hardirq on/off events:
 */
void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
{}
NOKPROBE_SYMBOL(tracer_hardirqs_on);

void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
{}
NOKPROBE_SYMBOL(tracer_hardirqs_off);

static int irqsoff_tracer_init(struct trace_array *tr)
{}

static void irqsoff_tracer_reset(struct trace_array *tr)
{}

static struct tracer irqsoff_tracer __read_mostly =;
#endif /*  CONFIG_IRQSOFF_TRACER */

#ifdef CONFIG_PREEMPT_TRACER
void tracer_preempt_on(unsigned long a0, unsigned long a1)
{}

void tracer_preempt_off(unsigned long a0, unsigned long a1)
{}

static int preemptoff_tracer_init(struct trace_array *tr)
{}

static void preemptoff_tracer_reset(struct trace_array *tr)
{}

static struct tracer preemptoff_tracer __read_mostly =;
#endif /* CONFIG_PREEMPT_TRACER */

#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)

static int preemptirqsoff_tracer_init(struct trace_array *tr)
{}

static void preemptirqsoff_tracer_reset(struct trace_array *tr)
{}

static struct tracer preemptirqsoff_tracer __read_mostly =;
#endif

__init static int init_irqsoff_tracer(void)
{}
core_initcall(init_irqsoff_tracer);
#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */