linux/kernel/trace/trace.c

// SPDX-License-Identifier: GPL-2.0
/*
 * ring buffer based function tracer
 *
 * Copyright (C) 2007-2012 Steven Rostedt <[email protected]>
 * Copyright (C) 2008 Ingo Molnar <[email protected]>
 *
 * Originally taken from the RT patch by:
 *    Arnaldo Carvalho de Melo <[email protected]>
 *
 * Based on code from the latency_tracer, that is:
 *  Copyright (C) 2004-2006 Ingo Molnar
 *  Copyright (C) 2004 Nadia Yvette Chambers
 */
#include <linux/ring_buffer.h>
#include <linux/utsname.h>
#include <linux/stacktrace.h>
#include <linux/writeback.h>
#include <linux/kallsyms.h>
#include <linux/security.h>
#include <linux/seq_file.h>
#include <linux/irqflags.h>
#include <linux/debugfs.h>
#include <linux/tracefs.h>
#include <linux/pagemap.h>
#include <linux/hardirq.h>
#include <linux/linkage.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/splice.h>
#include <linux/kdebug.h>
#include <linux/string.h>
#include <linux/mount.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/panic_notifier.h>
#include <linux/poll.h>
#include <linux/nmi.h>
#include <linux/fs.h>
#include <linux/trace.h>
#include <linux/sched/clock.h>
#include <linux/sched/rt.h>
#include <linux/fsnotify.h>
#include <linux/irq_work.h>
#include <linux/workqueue.h>

#include <asm/setup.h> /* COMMAND_LINE_SIZE */

#include "trace.h"
#include "trace_output.h"

#ifdef CONFIG_FTRACE_STARTUP_TEST
/*
 * We need to change this state when a selftest is running.
 * A selftest will lurk into the ring-buffer to count the
 * entries inserted during the selftest although some concurrent
 * insertions into the ring-buffer such as trace_printk could occurred
 * at the same time, giving false positive or negative results.
 */
static bool __read_mostly tracing_selftest_running;

/*
 * If boot-time tracing including tracers/events via kernel cmdline
 * is running, we do not want to run SELFTEST.
 */
bool __read_mostly tracing_selftest_disabled;

void __init disable_tracing_selftest(const char *reason)
{}
#else
#define tracing_selftest_running
#define tracing_selftest_disabled
#endif

/* Pipe tracepoints to printk */
static struct trace_iterator *tracepoint_print_iter;
int tracepoint_printk;
static bool tracepoint_printk_stop_on_boot __initdata;
static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);

/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] =;

static int
dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
{}

/*
 * To prevent the comm cache from being overwritten when no
 * tracing is active, only save the comm when a trace event
 * occurred.
 */
DEFINE_PER_CPU(bool, trace_taskinfo_save);

/*
 * Kill all tracing for good (never come back).
 * It is initialized to 1 but will turn to zero if the initialization
 * of the tracer is successful. But that is the only place that sets
 * this back to zero.
 */
static int tracing_disabled =;

cpumask_var_t __read_mostly	tracing_buffer_mask;

/*
 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
 *
 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
 * is set, then ftrace_dump is called. This will output the contents
 * of the ftrace buffers to the console.  This is very useful for
 * capturing traces that lead to crashes and outputing it to a
 * serial console.
 *
 * It is default off, but you can enable it with either specifying
 * "ftrace_dump_on_oops" in the kernel command line, or setting
 * /proc/sys/kernel/ftrace_dump_on_oops
 * Set 1 if you want to dump buffers of all CPUs
 * Set 2 if you want to dump the buffer of the CPU that triggered oops
 * Set instance name if you want to dump the specific trace instance
 * Multiple instance dump is also supported, and instances are seperated
 * by commas.
 */
/* Set to string format zero to disable by default */
char ftrace_dump_on_oops[MAX_TRACER_SIZE] =;

/* When set, tracing will stop when a WARN*() is hit */
int __disable_trace_on_warning;

#ifdef CONFIG_TRACE_EVAL_MAP_FILE
/* Map of enums to their values, for "eval_map" file */
struct trace_eval_map_head {};

trace_eval_map_item;

struct trace_eval_map_tail {};

static DEFINE_MUTEX(trace_eval_mutex);

/*
 * The trace_eval_maps are saved in an array with two extra elements,
 * one at the beginning, and one at the end. The beginning item contains
 * the count of the saved maps (head.length), and the module they
 * belong to if not built in (head.mod). The ending item contains a
 * pointer to the next array of saved eval_map items.
 */
trace_eval_map_item;

static union trace_eval_map_item *trace_eval_maps;
#endif /* CONFIG_TRACE_EVAL_MAP_FILE */

int tracing_set_tracer(struct trace_array *tr, const char *buf);
static void ftrace_trace_userstack(struct trace_array *tr,
				   struct trace_buffer *buffer,
				   unsigned int trace_ctx);

static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
static char *default_bootup_tracer;

static bool allocate_snapshot;
static bool snapshot_at_boot;

static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
static int boot_instance_index;

static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
static int boot_snapshot_index;

static int __init set_cmdline_ftrace(char *str)
{}
__setup();

int ftrace_dump_on_oops_enabled(void)
{}

static int __init set_ftrace_dump_on_oops(char *str)
{}
__setup();

static int __init stop_trace_on_warning(char *str)
{}
__setup();

static int __init boot_alloc_snapshot(char *str)
{}
__setup();


static int __init boot_snapshot(char *str)
{}
__setup();


static int __init boot_instance(char *str)
{}
__setup();


static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;

static int __init set_trace_boot_options(char *str)
{}
__setup();

static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
static char *trace_boot_clock __initdata;

static int __init set_trace_boot_clock(char *str)
{}
__setup();

static int __init set_tracepoint_printk(char *str)
{}
__setup();

static int __init set_tracepoint_printk_stop(char *str)
{}
__setup();

unsigned long long ns2usecs(u64 nsec)
{}

static void
trace_process_export(struct trace_export *export,
	       struct ring_buffer_event *event, int flag)
{}

static DEFINE_MUTEX(ftrace_export_lock);

static struct trace_export __rcu *ftrace_exports_list __read_mostly;

static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);

static inline void ftrace_exports_enable(struct trace_export *export)
{}

static inline void ftrace_exports_disable(struct trace_export *export)
{}

static void ftrace_exports(struct ring_buffer_event *event, int flag)
{}

static inline void
add_trace_export(struct trace_export **list, struct trace_export *export)
{}

static inline int
rm_trace_export(struct trace_export **list, struct trace_export *export)
{}

static inline void
add_ftrace_export(struct trace_export **list, struct trace_export *export)
{}

static inline int
rm_ftrace_export(struct trace_export **list, struct trace_export *export)
{}

int register_ftrace_export(struct trace_export *export)
{}
EXPORT_SYMBOL_GPL();

int unregister_ftrace_export(struct trace_export *export)
{}
EXPORT_SYMBOL_GPL();

/* trace_flags holds trace_options default values */
#define TRACE_DEFAULT_FLAGS

/* trace_options that are only supported by global_trace */
#define TOP_LEVEL_TRACE_FLAGS

/* trace_flags that are default zero for instances */
#define ZEROED_TRACE_FLAGS

/*
 * The global_trace is the descriptor that holds the top-level tracing
 * buffers for the live tracing.
 */
static struct trace_array global_trace =;

void trace_set_ring_buffer_expanded(struct trace_array *tr)
{}

LIST_HEAD();

int trace_array_get(struct trace_array *this_tr)
{}

static void __trace_array_put(struct trace_array *this_tr)
{}

/**
 * trace_array_put - Decrement the reference counter for this trace array.
 * @this_tr : pointer to the trace array
 *
 * NOTE: Use this when we no longer need the trace array returned by
 * trace_array_get_by_name(). This ensures the trace array can be later
 * destroyed.
 *
 */
void trace_array_put(struct trace_array *this_tr)
{}
EXPORT_SYMBOL_GPL();

int tracing_check_open_get_tr(struct trace_array *tr)
{}

int call_filter_check_discard(struct trace_event_call *call, void *rec,
			      struct trace_buffer *buffer,
			      struct ring_buffer_event *event)
{}

/**
 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
 * @filtered_pids: The list of pids to check
 * @search_pid: The PID to find in @filtered_pids
 *
 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
 */
bool
trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
{}

/**
 * trace_ignore_this_task - should a task be ignored for tracing
 * @filtered_pids: The list of pids to check
 * @filtered_no_pids: The list of pids not to be traced
 * @task: The task that should be ignored if not filtered
 *
 * Checks if @task should be traced or not from @filtered_pids.
 * Returns true if @task should *NOT* be traced.
 * Returns false if @task should be traced.
 */
bool
trace_ignore_this_task(struct trace_pid_list *filtered_pids,
		       struct trace_pid_list *filtered_no_pids,
		       struct task_struct *task)
{}

/**
 * trace_filter_add_remove_task - Add or remove a task from a pid_list
 * @pid_list: The list to modify
 * @self: The current task for fork or NULL for exit
 * @task: The task to add or remove
 *
 * If adding a task, if @self is defined, the task is only added if @self
 * is also included in @pid_list. This happens on fork and tasks should
 * only be added when the parent is listed. If @self is NULL, then the
 * @task pid will be removed from the list, which would happen on exit
 * of a task.
 */
void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
				  struct task_struct *self,
				  struct task_struct *task)
{}

/**
 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
 * @pid_list: The pid list to show
 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
 * @pos: The position of the file
 *
 * This is used by the seq_file "next" operation to iterate the pids
 * listed in a trace_pid_list structure.
 *
 * Returns the pid+1 as we want to display pid of zero, but NULL would
 * stop the iteration.
 */
void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
{}

/**
 * trace_pid_start - Used for seq_file to start reading pid lists
 * @pid_list: The pid list to show
 * @pos: The position of the file
 *
 * This is used by seq_file "start" operation to start the iteration
 * of listing pids.
 *
 * Returns the pid+1 as we want to display pid of zero, but NULL would
 * stop the iteration.
 */
void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
{}

/**
 * trace_pid_show - show the current pid in seq_file processing
 * @m: The seq_file structure to write into
 * @v: A void pointer of the pid (+1) value to display
 *
 * Can be directly used by seq_file operations to display the current
 * pid value.
 */
int trace_pid_show(struct seq_file *m, void *v)
{}

/* 128 should be much more than enough */
#define PID_BUF_SIZE

int trace_pid_write(struct trace_pid_list *filtered_pids,
		    struct trace_pid_list **new_pid_list,
		    const char __user *ubuf, size_t cnt)
{}

static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
{}

u64 ftrace_now(int cpu)
{}

/**
 * tracing_is_enabled - Show if global_trace has been enabled
 *
 * Shows if the global trace has been enabled or not. It uses the
 * mirror flag "buffer_disabled" to be used in fast paths such as for
 * the irqsoff tracer. But it may be inaccurate due to races. If you
 * need to know the accurate state, use tracing_is_on() which is a little
 * slower, but accurate.
 */
int tracing_is_enabled(void)
{}

/*
 * trace_buf_size is the size in bytes that is allocated
 * for a buffer. Note, the number of bytes is always rounded
 * to page size.
 *
 * This number is purposely set to a low number of 16384.
 * If the dump on oops happens, it will be much appreciated
 * to not have to wait for all that output. Anyway this can be
 * boot time and run time configurable.
 */
#define TRACE_BUF_SIZE_DEFAULT

static unsigned long		trace_buf_size =;

/* trace_types holds a link list of available tracers. */
static struct tracer		*trace_types __read_mostly;

/*
 * trace_types_lock is used to protect the trace_types list.
 */
DEFINE_MUTEX();

/*
 * serialize the access of the ring buffer
 *
 * ring buffer serializes readers, but it is low level protection.
 * The validity of the events (which returns by ring_buffer_peek() ..etc)
 * are not protected by ring buffer.
 *
 * The content of events may become garbage if we allow other process consumes
 * these events concurrently:
 *   A) the page of the consumed events may become a normal page
 *      (not reader page) in ring buffer, and this page will be rewritten
 *      by events producer.
 *   B) The page of the consumed events may become a page for splice_read,
 *      and this page will be returned to system.
 *
 * These primitives allow multi process access to different cpu ring buffer
 * concurrently.
 *
 * These primitives don't distinguish read-only and read-consume access.
 * Multi read-only access are also serialized.
 */

#ifdef CONFIG_SMP
static DECLARE_RWSEM(all_cpu_access_lock);
static DEFINE_PER_CPU(struct mutex, cpu_access_lock);

static inline void trace_access_lock(int cpu)
{}

static inline void trace_access_unlock(int cpu)
{}

static inline void trace_access_lock_init(void)
{}

#else

static DEFINE_MUTEX(access_lock);

static inline void trace_access_lock(int cpu)
{
	(void)cpu;
	mutex_lock(&access_lock);
}

static inline void trace_access_unlock(int cpu)
{
	(void)cpu;
	mutex_unlock(&access_lock);
}

static inline void trace_access_lock_init(void)
{
}

#endif

#ifdef CONFIG_STACKTRACE
static void __ftrace_trace_stack(struct trace_buffer *buffer,
				 unsigned int trace_ctx,
				 int skip, struct pt_regs *regs);
static inline void ftrace_trace_stack(struct trace_array *tr,
				      struct trace_buffer *buffer,
				      unsigned int trace_ctx,
				      int skip, struct pt_regs *regs);

#else
static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
					unsigned int trace_ctx,
					int skip, struct pt_regs *regs)
{
}
static inline void ftrace_trace_stack(struct trace_array *tr,
				      struct trace_buffer *buffer,
				      unsigned long trace_ctx,
				      int skip, struct pt_regs *regs)
{
}

#endif

static __always_inline void
trace_event_setup(struct ring_buffer_event *event,
		  int type, unsigned int trace_ctx)
{}

static __always_inline struct ring_buffer_event *
__trace_buffer_lock_reserve(struct trace_buffer *buffer,
			  int type,
			  unsigned long len,
			  unsigned int trace_ctx)
{}

void tracer_tracing_on(struct trace_array *tr)
{}

/**
 * tracing_on - enable tracing buffers
 *
 * This function enables tracing buffers that may have been
 * disabled with tracing_off.
 */
void tracing_on(void)
{}
EXPORT_SYMBOL_GPL();


static __always_inline void
__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
{}

int __trace_array_puts(struct trace_array *tr, unsigned long ip,
		       const char *str, int size)
{}
EXPORT_SYMBOL_GPL();

/**
 * __trace_puts - write a constant string into the trace buffer.
 * @ip:	   The address of the caller
 * @str:   The constant string to write
 * @size:  The size of the string.
 */
int __trace_puts(unsigned long ip, const char *str, int size)
{}
EXPORT_SYMBOL_GPL();

/**
 * __trace_bputs - write the pointer to a constant string into trace buffer
 * @ip:	   The address of the caller
 * @str:   The constant string to write to the buffer to
 */
int __trace_bputs(unsigned long ip, const char *str)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_TRACER_SNAPSHOT
static void tracing_snapshot_instance_cond(struct trace_array *tr,
					   void *cond_data)
{}

void tracing_snapshot_instance(struct trace_array *tr)
{}

/**
 * tracing_snapshot - take a snapshot of the current buffer.
 *
 * This causes a swap between the snapshot buffer and the current live
 * tracing buffer. You can use this to take snapshots of the live
 * trace when some condition is triggered, but continue to trace.
 *
 * Note, make sure to allocate the snapshot with either
 * a tracing_snapshot_alloc(), or by doing it manually
 * with: echo 1 > /sys/kernel/tracing/snapshot
 *
 * If the snapshot buffer is not allocated, it will stop tracing.
 * Basically making a permanent snapshot.
 */
void tracing_snapshot(void)
{}
EXPORT_SYMBOL_GPL();

/**
 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
 * @tr:		The tracing instance to snapshot
 * @cond_data:	The data to be tested conditionally, and possibly saved
 *
 * This is the same as tracing_snapshot() except that the snapshot is
 * conditional - the snapshot will only happen if the
 * cond_snapshot.update() implementation receiving the cond_data
 * returns true, which means that the trace array's cond_snapshot
 * update() operation used the cond_data to determine whether the
 * snapshot should be taken, and if it was, presumably saved it along
 * with the snapshot.
 */
void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
{}
EXPORT_SYMBOL_GPL();

/**
 * tracing_cond_snapshot_data - get the user data associated with a snapshot
 * @tr:		The tracing instance
 *
 * When the user enables a conditional snapshot using
 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
 * with the snapshot.  This accessor is used to retrieve it.
 *
 * Should not be called from cond_snapshot.update(), since it takes
 * the tr->max_lock lock, which the code calling
 * cond_snapshot.update() has already done.
 *
 * Returns the cond_data associated with the trace array's snapshot.
 */
void *tracing_cond_snapshot_data(struct trace_array *tr)
{}
EXPORT_SYMBOL_GPL();

static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
					struct array_buffer *size_buf, int cpu_id);
static void set_buffer_entries(struct array_buffer *buf, unsigned long val);

int tracing_alloc_snapshot_instance(struct trace_array *tr)
{}

static void free_snapshot(struct trace_array *tr)
{}

static int tracing_arm_snapshot_locked(struct trace_array *tr)
{}

int tracing_arm_snapshot(struct trace_array *tr)
{}

void tracing_disarm_snapshot(struct trace_array *tr)
{}

/**
 * tracing_alloc_snapshot - allocate snapshot buffer.
 *
 * This only allocates the snapshot buffer if it isn't already
 * allocated - it doesn't also take a snapshot.
 *
 * This is meant to be used in cases where the snapshot buffer needs
 * to be set up for events that can't sleep but need to be able to
 * trigger a snapshot.
 */
int tracing_alloc_snapshot(void)
{}
EXPORT_SYMBOL_GPL();

/**
 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
 *
 * This is similar to tracing_snapshot(), but it will allocate the
 * snapshot buffer if it isn't already allocated. Use this only
 * where it is safe to sleep, as the allocation may sleep.
 *
 * This causes a swap between the snapshot buffer and the current live
 * tracing buffer. You can use this to take snapshots of the live
 * trace when some condition is triggered, but continue to trace.
 */
void tracing_snapshot_alloc(void)
{}
EXPORT_SYMBOL_GPL();

/**
 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
 * @tr:		The tracing instance
 * @cond_data:	User data to associate with the snapshot
 * @update:	Implementation of the cond_snapshot update function
 *
 * Check whether the conditional snapshot for the given instance has
 * already been enabled, or if the current tracer is already using a
 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
 * save the cond_data and update function inside.
 *
 * Returns 0 if successful, error otherwise.
 */
int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
				 cond_update_fn_t update)
{}
EXPORT_SYMBOL_GPL();

/**
 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
 * @tr:		The tracing instance
 *
 * Check whether the conditional snapshot for the given instance is
 * enabled; if so, free the cond_snapshot associated with it,
 * otherwise return -EINVAL.
 *
 * Returns 0 if successful, error otherwise.
 */
int tracing_snapshot_cond_disable(struct trace_array *tr)
{}
EXPORT_SYMBOL_GPL();
#else
void tracing_snapshot(void)
{
	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
}
EXPORT_SYMBOL_GPL(tracing_snapshot);
void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
{
	WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
}
EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
int tracing_alloc_snapshot(void)
{
	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
	return -ENODEV;
}
EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
void tracing_snapshot_alloc(void)
{
	/* Give warning */
	tracing_snapshot();
}
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
void *tracing_cond_snapshot_data(struct trace_array *tr)
{
	return NULL;
}
EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
{
	return -ENODEV;
}
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
int tracing_snapshot_cond_disable(struct trace_array *tr)
{
	return false;
}
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
#define free_snapshot
#define tracing_arm_snapshot_locked
#endif /* CONFIG_TRACER_SNAPSHOT */

void tracer_tracing_off(struct trace_array *tr)
{}

/**
 * tracing_off - turn off tracing buffers
 *
 * This function stops the tracing buffers from recording data.
 * It does not disable any overhead the tracers themselves may
 * be causing. This function simply causes all recording to
 * the ring buffers to fail.
 */
void tracing_off(void)
{}
EXPORT_SYMBOL_GPL();

void disable_trace_on_warning(void)
{}

/**
 * tracer_tracing_is_on - show real state of ring buffer enabled
 * @tr : the trace array to know if ring buffer is enabled
 *
 * Shows real state of the ring buffer if it is enabled or not.
 */
bool tracer_tracing_is_on(struct trace_array *tr)
{}

/**
 * tracing_is_on - show state of ring buffers enabled
 */
int tracing_is_on(void)
{}
EXPORT_SYMBOL_GPL();

static int __init set_buf_size(char *str)
{}
__setup();

static int __init set_tracing_thresh(char *str)
{}
__setup();

unsigned long nsecs_to_usecs(unsigned long nsecs)
{}

/*
 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
 * of strings in the order that the evals (enum) were defined.
 */
#undef C
#define C(a, b)

/* These must match the bit positions in trace_iterator_flags */
static const char *trace_options[] =;

static struct {} trace_clocks[] =;

bool trace_clock_in_ns(struct trace_array *tr)
{}

/*
 * trace_parser_get_init - gets the buffer for trace parser
 */
int trace_parser_get_init(struct trace_parser *parser, int size)
{}

/*
 * trace_parser_put - frees the buffer for trace parser
 */
void trace_parser_put(struct trace_parser *parser)
{}

/*
 * trace_get_user - reads the user input string separated by  space
 * (matched by isspace(ch))
 *
 * For each string found the 'struct trace_parser' is updated,
 * and the function returns.
 *
 * Returns number of bytes read.
 *
 * See kernel/trace/trace.h for 'struct trace_parser' details.
 */
int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
	size_t cnt, loff_t *ppos)
{}

/* TODO add a seq_buf_to_buffer() */
static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
{}

unsigned long __read_mostly	tracing_thresh;

#ifdef CONFIG_TRACER_MAX_TRACE
static const struct file_operations tracing_max_lat_fops;

#ifdef LATENCY_FS_NOTIFY

static struct workqueue_struct *fsnotify_wq;

static void latency_fsnotify_workfn(struct work_struct *work)
{}

static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
{}

static void trace_create_maxlat_file(struct trace_array *tr,
				     struct dentry *d_tracer)
{}

__init static int latency_fsnotify_init(void)
{}

late_initcall_sync(latency_fsnotify_init);

void latency_fsnotify(struct trace_array *tr)
{}

#else /* !LATENCY_FS_NOTIFY */

#define trace_create_maxlat_file

#endif

/*
 * Copy the new maximum trace into the separate maximum-trace
 * structure. (this way the maximum trace is permanently saved,
 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
 */
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{}

/**
 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
 * @tr: tracer
 * @tsk: the task with the latency
 * @cpu: The cpu that initiated the trace.
 * @cond_data: User data associated with a conditional snapshot
 *
 * Flip the buffers between the @tr and the max_tr and record information
 * about which task was the cause of this latency.
 */
void
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
	      void *cond_data)
{}

/**
 * update_max_tr_single - only copy one trace over, and reset the rest
 * @tr: tracer
 * @tsk: task with the latency
 * @cpu: the cpu of the buffer to copy.
 *
 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
 */
void
update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
{}

#endif /* CONFIG_TRACER_MAX_TRACE */

struct pipe_wait {};

static bool wait_pipe_cond(void *data)
{}

static int wait_on_pipe(struct trace_iterator *iter, int full)
{}

#ifdef CONFIG_FTRACE_STARTUP_TEST
static bool selftests_can_run;

struct trace_selftests {};

static LIST_HEAD(postponed_selftests);

static int save_selftest(struct tracer *type)
{}

static int run_tracer_selftest(struct tracer *type)
{}

static int do_run_tracer_selftest(struct tracer *type)
{}

static __init int init_trace_selftests(void)
{}
core_initcall(init_trace_selftests);
#else
static inline int run_tracer_selftest(struct tracer *type)
{
	return 0;
}
static inline int do_run_tracer_selftest(struct tracer *type)
{
	return 0;
}
#endif /* CONFIG_FTRACE_STARTUP_TEST */

static void add_tracer_options(struct trace_array *tr, struct tracer *t);

static void __init apply_trace_boot_options(void);

/**
 * register_tracer - register a tracer with the ftrace system.
 * @type: the plugin for the tracer
 *
 * Register a new plugin tracer.
 */
int __init register_tracer(struct tracer *type)
{}

static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
{}

void tracing_reset_online_cpus(struct array_buffer *buf)
{}

/* Must have trace_types_lock held */
void tracing_reset_all_online_cpus_unlocked(void)
{}

void tracing_reset_all_online_cpus(void)
{}

int is_tracing_stopped(void)
{}

static void tracing_start_tr(struct trace_array *tr)
{}

/**
 * tracing_start - quick start of the tracer
 *
 * If tracing is enabled but was stopped by tracing_stop,
 * this will start the tracer back up.
 */
void tracing_start(void)

{}

static void tracing_stop_tr(struct trace_array *tr)
{}

/**
 * tracing_stop - quick stop of the tracer
 *
 * Light weight way to stop tracing. Use in conjunction with
 * tracing_start.
 */
void tracing_stop(void)
{}

/*
 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
 * simplifies those functions and keeps them in sync.
 */
enum print_line_t trace_handle_return(struct trace_seq *s)
{}
EXPORT_SYMBOL_GPL();

static unsigned short migration_disable_value(void)
{}

unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
{}

struct ring_buffer_event *
trace_buffer_lock_reserve(struct trace_buffer *buffer,
			  int type,
			  unsigned long len,
			  unsigned int trace_ctx)
{}

DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
DEFINE_PER_CPU(int, trace_buffered_event_cnt);
static int trace_buffered_event_ref;

/**
 * trace_buffered_event_enable - enable buffering events
 *
 * When events are being filtered, it is quicker to use a temporary
 * buffer to write the event data into if there's a likely chance
 * that it will not be committed. The discard of the ring buffer
 * is not as fast as committing, and is much slower than copying
 * a commit.
 *
 * When an event is to be filtered, allocate per cpu buffers to
 * write the event data into, and if the event is filtered and discarded
 * it is simply dropped, otherwise, the entire data is to be committed
 * in one shot.
 */
void trace_buffered_event_enable(void)
{}

static void enable_trace_buffered_event(void *data)
{}

static void disable_trace_buffered_event(void *data)
{}

/**
 * trace_buffered_event_disable - disable buffering events
 *
 * When a filter is removed, it is faster to not use the buffered
 * events, and to commit directly into the ring buffer. Free up
 * the temp buffers when there are no more users. This requires
 * special synchronization with current events.
 */
void trace_buffered_event_disable(void)
{}

static struct trace_buffer *temp_buffer;

struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
			  struct trace_event_file *trace_file,
			  int type, unsigned long len,
			  unsigned int trace_ctx)
{}
EXPORT_SYMBOL_GPL();

static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
static DEFINE_MUTEX(tracepoint_printk_mutex);

static void output_printk(struct trace_event_buffer *fbuffer)
{}

int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
			     void *buffer, size_t *lenp,
			     loff_t *ppos)
{}

void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
{}
EXPORT_SYMBOL_GPL();

/*
 * Skip 3:
 *
 *   trace_buffer_unlock_commit_regs()
 *   trace_event_buffer_commit()
 *   trace_event_raw_event_xxx()
 */
#define STACK_SKIP

void trace_buffer_unlock_commit_regs(struct trace_array *tr,
				     struct trace_buffer *buffer,
				     struct ring_buffer_event *event,
				     unsigned int trace_ctx,
				     struct pt_regs *regs)
{}

/*
 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
 */
void
trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
				   struct ring_buffer_event *event)
{}

void
trace_function(struct trace_array *tr, unsigned long ip, unsigned long
	       parent_ip, unsigned int trace_ctx)
{}

#ifdef CONFIG_STACKTRACE

/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
#define FTRACE_KSTACK_NESTING

#define FTRACE_KSTACK_ENTRIES

struct ftrace_stack {};


struct ftrace_stacks {};

static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
static DEFINE_PER_CPU(int, ftrace_stack_reserve);

static void __ftrace_trace_stack(struct trace_buffer *buffer,
				 unsigned int trace_ctx,
				 int skip, struct pt_regs *regs)
{}

static inline void ftrace_trace_stack(struct trace_array *tr,
				      struct trace_buffer *buffer,
				      unsigned int trace_ctx,
				      int skip, struct pt_regs *regs)
{}

void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
		   int skip)
{}

/**
 * trace_dump_stack - record a stack back trace in the trace buffer
 * @skip: Number of functions to skip (helper handlers)
 */
void trace_dump_stack(int skip)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_USER_STACKTRACE_SUPPORT
static DEFINE_PER_CPU(int, user_stack_count);

static void
ftrace_trace_userstack(struct trace_array *tr,
		       struct trace_buffer *buffer, unsigned int trace_ctx)
{}
#else /* CONFIG_USER_STACKTRACE_SUPPORT */
static void ftrace_trace_userstack(struct trace_array *tr,
				   struct trace_buffer *buffer,
				   unsigned int trace_ctx)
{
}
#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */

#endif /* CONFIG_STACKTRACE */

static inline void
func_repeats_set_delta_ts(struct func_repeats_entry *entry,
			  unsigned long long delta)
{}

void trace_last_func_repeats(struct trace_array *tr,
			     struct trace_func_repeats *last_info,
			     unsigned int trace_ctx)
{}

/* created for use with alloc_percpu */
struct trace_buffer_struct {};

static struct trace_buffer_struct __percpu *trace_percpu_buffer;

/*
 * This allows for lockless recording.  If we're nested too deeply, then
 * this returns NULL.
 */
static char *get_trace_buf(void)
{}

static void put_trace_buf(void)
{}

static int alloc_percpu_trace_buffer(void)
{}

static int buffers_allocated;

void trace_printk_init_buffers(void)
{}
EXPORT_SYMBOL_GPL();

void trace_printk_start_comm(void)
{}

static void trace_printk_start_stop_comm(int enabled)
{}

/**
 * trace_vbprintk - write binary msg to tracing buffer
 * @ip:    The address of the caller
 * @fmt:   The string format to write to the buffer
 * @args:  Arguments for @fmt
 */
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{}
EXPORT_SYMBOL_GPL();

__printf(3, 0)
static int
__trace_array_vprintk(struct trace_buffer *buffer,
		      unsigned long ip, const char *fmt, va_list args)
{}

__printf(3, 0)
int trace_array_vprintk(struct trace_array *tr,
			unsigned long ip, const char *fmt, va_list args)
{}

/**
 * trace_array_printk - Print a message to a specific instance
 * @tr: The instance trace_array descriptor
 * @ip: The instruction pointer that this is called from.
 * @fmt: The format to print (printf format)
 *
 * If a subsystem sets up its own instance, they have the right to
 * printk strings into their tracing instance buffer using this
 * function. Note, this function will not write into the top level
 * buffer (use trace_printk() for that), as writing into the top level
 * buffer should only have events that can be individually disabled.
 * trace_printk() is only used for debugging a kernel, and should not
 * be ever incorporated in normal use.
 *
 * trace_array_printk() can be used, as it will not add noise to the
 * top level tracing buffer.
 *
 * Note, trace_array_init_printk() must be called on @tr before this
 * can be used.
 */
__printf(3, 0)
int trace_array_printk(struct trace_array *tr,
		       unsigned long ip, const char *fmt, ...)
{}
EXPORT_SYMBOL_GPL();

/**
 * trace_array_init_printk - Initialize buffers for trace_array_printk()
 * @tr: The trace array to initialize the buffers for
 *
 * As trace_array_printk() only writes into instances, they are OK to
 * have in the kernel (unlike trace_printk()). This needs to be called
 * before trace_array_printk() can be used on a trace_array.
 */
int trace_array_init_printk(struct trace_array *tr)
{}
EXPORT_SYMBOL_GPL();

__printf(3, 4)
int trace_array_printk_buf(struct trace_buffer *buffer,
			   unsigned long ip, const char *fmt, ...)
{}

__printf(2, 0)
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{}
EXPORT_SYMBOL_GPL();

static void trace_iterator_increment(struct trace_iterator *iter)
{}

static struct trace_entry *
peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
		unsigned long *lost_events)
{}

static struct trace_entry *
__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
		  unsigned long *missing_events, u64 *ent_ts)
{}

#define STATIC_FMT_BUF_SIZE
static char static_fmt_buf[STATIC_FMT_BUF_SIZE];

char *trace_iter_expand_format(struct trace_iterator *iter)
{}

/* Returns true if the string is safe to dereference from an event */
static bool trace_safe_str(struct trace_iterator *iter, const char *str,
			   bool star, int len)
{}

static DEFINE_STATIC_KEY_FALSE(trace_no_verify);

static int test_can_verify_check(const char *fmt, ...)
{}

static void test_can_verify(void)
{}

/**
 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
 * @iter: The iterator that holds the seq buffer and the event being printed
 * @fmt: The format used to print the event
 * @ap: The va_list holding the data to print from @fmt.
 *
 * This writes the data into the @iter->seq buffer using the data from
 * @fmt and @ap. If the format has a %s, then the source of the string
 * is examined to make sure it is safe to print, otherwise it will
 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
 * pointer.
 */
void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
			 va_list ap)
{}

const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
{}

#define STATIC_TEMP_BUF_SIZE
static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned();

/* Find the next real entry, without updating the iterator itself */
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
					  int *ent_cpu, u64 *ent_ts)
{}

/* Find the next real entry, and increment the iterator to the next entry */
void *trace_find_next_entry_inc(struct trace_iterator *iter)
{}

static void trace_consume(struct trace_iterator *iter)
{}

static void *s_next(struct seq_file *m, void *v, loff_t *pos)
{}

void tracing_iter_reset(struct trace_iterator *iter, int cpu)
{}

/*
 * The current tracer is copied to avoid a global locking
 * all around.
 */
static void *s_start(struct seq_file *m, loff_t *pos)
{}

static void s_stop(struct seq_file *m, void *p)
{}

static void
get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
		      unsigned long *entries, int cpu)
{}

static void
get_total_entries(struct array_buffer *buf,
		  unsigned long *total, unsigned long *entries)
{}

unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
{}

unsigned long trace_total_entries(struct trace_array *tr)
{}

static void print_lat_help_header(struct seq_file *m)
{}

static void print_event_info(struct array_buffer *buf, struct seq_file *m)
{}

static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
				   unsigned int flags)
{}

static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
				       unsigned int flags)
{}

void
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
{}

static void test_cpu_buff_start(struct trace_iterator *iter)
{}

static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
{}

static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
{}

static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
{}

static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
{}

int trace_empty(struct trace_iterator *iter)
{}

/*  Called with trace_event_read_lock() held. */
enum print_line_t print_trace_line(struct trace_iterator *iter)
{}

void trace_latency_header(struct seq_file *m)
{}

void trace_default_header(struct seq_file *m)
{}

static void test_ftrace_alive(struct seq_file *m)
{}

#ifdef CONFIG_TRACER_MAX_TRACE
static void show_snapshot_main_help(struct seq_file *m)
{}

static void show_snapshot_percpu_help(struct seq_file *m)
{}

static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
{}
#else
/* Should never be called */
static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
#endif

static int s_show(struct seq_file *m, void *v)
{}

/*
 * Should be used after trace_array_get(), trace_types_lock
 * ensures that i_cdev was already initialized.
 */
static inline int tracing_get_cpu(struct inode *inode)
{}

static const struct seq_operations tracer_seq_ops =;

/*
 * Note, as iter itself can be allocated and freed in different
 * ways, this function is only used to free its content, and not
 * the iterator itself. The only requirement to all the allocations
 * is that it must zero all fields (kzalloc), as freeing works with
 * ethier allocated content or NULL.
 */
static void free_trace_iter_content(struct trace_iterator *iter)
{}

static struct trace_iterator *
__tracing_open(struct inode *inode, struct file *file, bool snapshot)
{}

int tracing_open_generic(struct inode *inode, struct file *filp)
{}

bool tracing_is_disabled(void)
{}

/*
 * Open and update trace_array ref count.
 * Must have the current trace_array passed to it.
 */
int tracing_open_generic_tr(struct inode *inode, struct file *filp)
{}

/*
 * The private pointer of the inode is the trace_event_file.
 * Update the tr ref count associated to it.
 */
int tracing_open_file_tr(struct inode *inode, struct file *filp)
{}

int tracing_release_file_tr(struct inode *inode, struct file *filp)
{}

int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
{}

static int tracing_mark_open(struct inode *inode, struct file *filp)
{}

static int tracing_release(struct inode *inode, struct file *file)
{}

int tracing_release_generic_tr(struct inode *inode, struct file *file)
{}

static int tracing_single_release_tr(struct inode *inode, struct file *file)
{}

static int tracing_open(struct inode *inode, struct file *file)
{}

/*
 * Some tracers are not suitable for instance buffers.
 * A tracer is always available for the global array (toplevel)
 * or if it explicitly states that it is.
 */
static bool
trace_ok_for_array(struct tracer *t, struct trace_array *tr)
{}

/* Find the next tracer that this trace array may use */
static struct tracer *
get_tracer_for_array(struct trace_array *tr, struct tracer *t)
{}

static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{}

static void *t_start(struct seq_file *m, loff_t *pos)
{}

static void t_stop(struct seq_file *m, void *p)
{}

static int t_show(struct seq_file *m, void *v)
{}

static const struct seq_operations show_traces_seq_ops =;

static int show_traces_open(struct inode *inode, struct file *file)
{}

static int show_traces_release(struct inode *inode, struct file *file)
{}

static ssize_t
tracing_write_stub(struct file *filp, const char __user *ubuf,
		   size_t count, loff_t *ppos)
{}

loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
{}

static const struct file_operations tracing_fops =;

static const struct file_operations show_traces_fops =;

static ssize_t
tracing_cpumask_read(struct file *filp, char __user *ubuf,
		     size_t count, loff_t *ppos)
{}

int tracing_set_cpumask(struct trace_array *tr,
			cpumask_var_t tracing_cpumask_new)
{}

static ssize_t
tracing_cpumask_write(struct file *filp, const char __user *ubuf,
		      size_t count, loff_t *ppos)
{}

static const struct file_operations tracing_cpumask_fops =;

static int tracing_trace_options_show(struct seq_file *m, void *v)
{}

static int __set_tracer_option(struct trace_array *tr,
			       struct tracer_flags *tracer_flags,
			       struct tracer_opt *opts, int neg)
{}

/* Try to assign a tracer specific option */
static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
{}

/* Some tracers require overwrite to stay enabled */
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
{}

int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
{}

int trace_set_options(struct trace_array *tr, char *option)
{}

static void __init apply_trace_boot_options(void)
{}

static ssize_t
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
			size_t cnt, loff_t *ppos)
{}

static int tracing_trace_options_open(struct inode *inode, struct file *file)
{}

static const struct file_operations tracing_iter_fops =;

static const char readme_msg[] =
#endif
#endif
;

static ssize_t
tracing_readme_read(struct file *filp, char __user *ubuf,
		       size_t cnt, loff_t *ppos)
{}

static const struct file_operations tracing_readme_fops =;

#ifdef CONFIG_TRACE_EVAL_MAP_FILE
static union trace_eval_map_item *
update_eval_map(union trace_eval_map_item *ptr)
{}

static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
{}

static void *eval_map_start(struct seq_file *m, loff_t *pos)
{}

static void eval_map_stop(struct seq_file *m, void *v)
{}

static int eval_map_show(struct seq_file *m, void *v)
{}

static const struct seq_operations tracing_eval_map_seq_ops =;

static int tracing_eval_map_open(struct inode *inode, struct file *filp)
{}

static const struct file_operations tracing_eval_map_fops =;

static inline union trace_eval_map_item *
trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
{}

static void
trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
			   int len)
{}

static void trace_create_eval_file(struct dentry *d_tracer)
{}

#else /* CONFIG_TRACE_EVAL_MAP_FILE */
static inline void trace_create_eval_file(struct dentry *d_tracer) { }
static inline void trace_insert_eval_map_file(struct module *mod,
			      struct trace_eval_map **start, int len) { }
#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */

static void trace_insert_eval_map(struct module *mod,
				  struct trace_eval_map **start, int len)
{}

static ssize_t
tracing_set_trace_read(struct file *filp, char __user *ubuf,
		       size_t cnt, loff_t *ppos)
{}

int tracer_init(struct tracer *t, struct trace_array *tr)
{}

static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
{}

static void update_buffer_entries(struct array_buffer *buf, int cpu)
{}

#ifdef CONFIG_TRACER_MAX_TRACE
/* resize @tr's buffer to the size of @size_tr's entries */
static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
					struct array_buffer *size_buf, int cpu_id)
{}
#endif /* CONFIG_TRACER_MAX_TRACE */

static int __tracing_resize_ring_buffer(struct trace_array *tr,
					unsigned long size, int cpu)
{}

ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
				  unsigned long size, int cpu_id)
{}


/**
 * tracing_update_buffers - used by tracing facility to expand ring buffers
 * @tr: The tracing instance
 *
 * To save on memory when the tracing is never used on a system with it
 * configured in. The ring buffers are set to a minimum size. But once
 * a user starts to use the tracing facility, then they need to grow
 * to their default size.
 *
 * This function is to be called when a tracer is about to be used.
 */
int tracing_update_buffers(struct trace_array *tr)
{}

struct trace_option_dentry;

static void
create_trace_option_files(struct trace_array *tr, struct tracer *tracer);

/*
 * Used to clear out the tracer before deletion of an instance.
 * Must have trace_types_lock held.
 */
static void tracing_set_nop(struct trace_array *tr)
{}

static bool tracer_options_updated;

static void add_tracer_options(struct trace_array *tr, struct tracer *t)
{}

int tracing_set_tracer(struct trace_array *tr, const char *buf)
{}

static ssize_t
tracing_set_trace_write(struct file *filp, const char __user *ubuf,
			size_t cnt, loff_t *ppos)
{}

static ssize_t
tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
		   size_t cnt, loff_t *ppos)
{}

static ssize_t
tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
		    size_t cnt, loff_t *ppos)
{}

static ssize_t
tracing_thresh_read(struct file *filp, char __user *ubuf,
		    size_t cnt, loff_t *ppos)
{}

static ssize_t
tracing_thresh_write(struct file *filp, const char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{}

#ifdef CONFIG_TRACER_MAX_TRACE

static ssize_t
tracing_max_lat_read(struct file *filp, char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{}

static ssize_t
tracing_max_lat_write(struct file *filp, const char __user *ubuf,
		      size_t cnt, loff_t *ppos)
{}

#endif

static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
{}

static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
{}

static int tracing_open_pipe(struct inode *inode, struct file *filp)
{}

static int tracing_release_pipe(struct inode *inode, struct file *file)
{}

static __poll_t
trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
{}

static __poll_t
tracing_poll_pipe(struct file *filp, poll_table *poll_table)
{}

/* Must be called with iter->mutex held. */
static int tracing_wait_pipe(struct file *filp)
{}

/*
 * Consumer reader.
 */
static ssize_t
tracing_read_pipe(struct file *filp, char __user *ubuf,
		  size_t cnt, loff_t *ppos)
{}

static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
				     unsigned int idx)
{}

static size_t
tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
{}

static ssize_t tracing_splice_read_pipe(struct file *filp,
					loff_t *ppos,
					struct pipe_inode_info *pipe,
					size_t len,
					unsigned int flags)
{}

static ssize_t
tracing_entries_read(struct file *filp, char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{}

static ssize_t
tracing_entries_write(struct file *filp, const char __user *ubuf,
		      size_t cnt, loff_t *ppos)
{}

static ssize_t
tracing_total_entries_read(struct file *filp, char __user *ubuf,
				size_t cnt, loff_t *ppos)
{}

static ssize_t
tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
			  size_t cnt, loff_t *ppos)
{}

static int
tracing_free_buffer_release(struct inode *inode, struct file *filp)
{}

#define TRACE_MARKER_MAX_SIZE

static ssize_t
tracing_mark_write(struct file *filp, const char __user *ubuf,
					size_t cnt, loff_t *fpos)
{}

static ssize_t
tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
					size_t cnt, loff_t *fpos)
{}

static int tracing_clock_show(struct seq_file *m, void *v)
{}

int tracing_set_clock(struct trace_array *tr, const char *clockstr)
{}

static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
				   size_t cnt, loff_t *fpos)
{}

static int tracing_clock_open(struct inode *inode, struct file *file)
{}

static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
{}

static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
{}

u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
{}

/*
 * Set or disable using the per CPU trace_buffer_event when possible.
 */
int tracing_set_filter_buffering(struct trace_array *tr, bool set)
{}

struct ftrace_buffer_info {};

#ifdef CONFIG_TRACER_SNAPSHOT
static int tracing_snapshot_open(struct inode *inode, struct file *file)
{}

static void tracing_swap_cpu_buffer(void *tr)
{}

static ssize_t
tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
		       loff_t *ppos)
{}

static int tracing_snapshot_release(struct inode *inode, struct file *file)
{}

static int tracing_buffers_open(struct inode *inode, struct file *filp);
static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
				    size_t count, loff_t *ppos);
static int tracing_buffers_release(struct inode *inode, struct file *file);
static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);

static int snapshot_raw_open(struct inode *inode, struct file *filp)
{}

#endif /* CONFIG_TRACER_SNAPSHOT */


static const struct file_operations tracing_thresh_fops =;

#ifdef CONFIG_TRACER_MAX_TRACE
static const struct file_operations tracing_max_lat_fops =;
#endif

static const struct file_operations set_tracer_fops =;

static const struct file_operations tracing_pipe_fops =;

static const struct file_operations tracing_entries_fops =;

static const struct file_operations tracing_total_entries_fops =;

static const struct file_operations tracing_free_buffer_fops =;

static const struct file_operations tracing_mark_fops =;

static const struct file_operations tracing_mark_raw_fops =;

static const struct file_operations trace_clock_fops =;

static const struct file_operations trace_time_stamp_mode_fops =;

#ifdef CONFIG_TRACER_SNAPSHOT
static const struct file_operations snapshot_fops =;

static const struct file_operations snapshot_raw_fops =;

#endif /* CONFIG_TRACER_SNAPSHOT */

/*
 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
 * @filp: The active open file structure
 * @ubuf: The userspace provided buffer to read value into
 * @cnt: The maximum number of bytes to read
 * @ppos: The current "file" position
 *
 * This function implements the write interface for a struct trace_min_max_param.
 * The filp->private_data must point to a trace_min_max_param structure that
 * defines where to write the value, the min and the max acceptable values,
 * and a lock to protect the write.
 */
static ssize_t
trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
{}

/*
 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
 * @filp: The active open file structure
 * @ubuf: The userspace provided buffer to read value into
 * @cnt: The maximum number of bytes to read
 * @ppos: The current "file" position
 *
 * This function implements the read interface for a struct trace_min_max_param.
 * The filp->private_data must point to a trace_min_max_param struct with valid
 * data.
 */
static ssize_t
trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{}

const struct file_operations trace_min_max_fops =;

#define TRACING_LOG_ERRS_MAX
#define TRACING_LOG_LOC_MAX

#define CMD_PREFIX

struct err_info {};

struct tracing_log_err {};

static DEFINE_MUTEX(tracing_err_log_lock);

static struct tracing_log_err *alloc_tracing_log_err(int len)
{}

static void free_tracing_log_err(struct tracing_log_err *err)
{}

static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
						   int len)
{}

/**
 * err_pos - find the position of a string within a command for error careting
 * @cmd: The tracing command that caused the error
 * @str: The string to position the caret at within @cmd
 *
 * Finds the position of the first occurrence of @str within @cmd.  The
 * return value can be passed to tracing_log_err() for caret placement
 * within @cmd.
 *
 * Returns the index within @cmd of the first occurrence of @str or 0
 * if @str was not found.
 */
unsigned int err_pos(char *cmd, const char *str)
{}

/**
 * tracing_log_err - write an error to the tracing error log
 * @tr: The associated trace array for the error (NULL for top level array)
 * @loc: A string describing where the error occurred
 * @cmd: The tracing command that caused the error
 * @errs: The array of loc-specific static error strings
 * @type: The index into errs[], which produces the specific static err string
 * @pos: The position the caret should be placed in the cmd
 *
 * Writes an error into tracing/error_log of the form:
 *
 * <loc>: error: <text>
 *   Command: <cmd>
 *              ^
 *
 * tracing/error_log is a small log file containing the last
 * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
 * unless there has been a tracing error, and the error log can be
 * cleared and have its memory freed by writing the empty string in
 * truncation mode to it i.e. echo > tracing/error_log.
 *
 * NOTE: the @errs array along with the @type param are used to
 * produce a static error string - this string is not copied and saved
 * when the error is logged - only a pointer to it is saved.  See
 * existing callers for examples of how static strings are typically
 * defined for use with tracing_log_err().
 */
void tracing_log_err(struct trace_array *tr,
		     const char *loc, const char *cmd,
		     const char **errs, u8 type, u16 pos)
{}

static void clear_tracing_err_log(struct trace_array *tr)
{}

static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
{}

static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
{}

static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
{}

static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
{}

static int tracing_err_log_seq_show(struct seq_file *m, void *v)
{}

static const struct seq_operations tracing_err_log_seq_ops =;

static int tracing_err_log_open(struct inode *inode, struct file *file)
{}

static ssize_t tracing_err_log_write(struct file *file,
				     const char __user *buffer,
				     size_t count, loff_t *ppos)
{}

static int tracing_err_log_release(struct inode *inode, struct file *file)
{}

static const struct file_operations tracing_err_log_fops =;

static int tracing_buffers_open(struct inode *inode, struct file *filp)
{}

static __poll_t
tracing_buffers_poll(struct file *filp, poll_table *poll_table)
{}

static ssize_t
tracing_buffers_read(struct file *filp, char __user *ubuf,
		     size_t count, loff_t *ppos)
{}

static int tracing_buffers_flush(struct file *file, fl_owner_t id)
{}

static int tracing_buffers_release(struct inode *inode, struct file *file)
{}

struct buffer_ref {};

static void buffer_ref_release(struct buffer_ref *ref)
{}

static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
				    struct pipe_buffer *buf)
{}

static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
				struct pipe_buffer *buf)
{}

/* Pipe buffer operations for a buffer. */
static const struct pipe_buf_operations buffer_pipe_buf_ops =;

/*
 * Callback from splice_to_pipe(), if we need to release some pages
 * at the end of the spd in case we error'ed out in filling the pipe.
 */
static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
{}

static ssize_t
tracing_buffers_splice_read(struct file *file, loff_t *ppos,
			    struct pipe_inode_info *pipe, size_t len,
			    unsigned int flags)
{}

static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{}

#ifdef CONFIG_TRACER_MAX_TRACE
static int get_snapshot_map(struct trace_array *tr)
{}
static void put_snapshot_map(struct trace_array *tr)
{}
#else
static inline int get_snapshot_map(struct trace_array *tr) { return 0; }
static inline void put_snapshot_map(struct trace_array *tr) { }
#endif

static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
{}

static const struct vm_operations_struct tracing_buffers_vmops =;

static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
{}

static const struct file_operations tracing_buffers_fops =;

static ssize_t
tracing_stats_read(struct file *filp, char __user *ubuf,
		   size_t count, loff_t *ppos)
{}

static const struct file_operations tracing_stats_fops =;

#ifdef CONFIG_DYNAMIC_FTRACE

static ssize_t
tracing_read_dyn_info(struct file *filp, char __user *ubuf,
		  size_t cnt, loff_t *ppos)
{}

static const struct file_operations tracing_dyn_info_fops =;
#endif /* CONFIG_DYNAMIC_FTRACE */

#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
static void
ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
		struct trace_array *tr, struct ftrace_probe_ops *ops,
		void *data)
{}

static void
ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
		      struct trace_array *tr, struct ftrace_probe_ops *ops,
		      void *data)
{}

static int
ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
		      struct ftrace_probe_ops *ops, void *data)
{}

static int
ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
		     unsigned long ip, void *init_data, void **data)
{}

static void
ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
		     unsigned long ip, void *data)
{}

static struct ftrace_probe_ops snapshot_probe_ops =;

static struct ftrace_probe_ops snapshot_count_probe_ops =;

static int
ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
			       char *glob, char *cmd, char *param, int enable)
{}

static struct ftrace_func_command ftrace_snapshot_cmd =;

static __init int register_snapshot_cmd(void)
{}
#else
static inline __init int register_snapshot_cmd(void) { return 0; }
#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */

static struct dentry *tracing_get_dentry(struct trace_array *tr)
{}

static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
{}

static struct dentry *
trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
		      void *data, long cpu, const struct file_operations *fops)
{}

static void
tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
{}

#ifdef CONFIG_FTRACE_SELFTEST
/* Let selftest have access to static functions in this file */
#include "trace_selftest.c"
#endif

static ssize_t
trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
			loff_t *ppos)
{}

static ssize_t
trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
			 loff_t *ppos)
{}

static int tracing_open_options(struct inode *inode, struct file *filp)
{}

static int tracing_release_options(struct inode *inode, struct file *file)
{}

static const struct file_operations trace_options_fops =;

/*
 * In order to pass in both the trace_array descriptor as well as the index
 * to the flag that the trace option file represents, the trace_array
 * has a character array of trace_flags_index[], which holds the index
 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
 * The address of this character array is passed to the flag option file
 * read/write callbacks.
 *
 * In order to extract both the index and the trace_array descriptor,
 * get_tr_index() uses the following algorithm.
 *
 *   idx = *ptr;
 *
 * As the pointer itself contains the address of the index (remember
 * index[1] == 1).
 *
 * Then to get the trace_array descriptor, by subtracting that index
 * from the ptr, we get to the start of the index itself.
 *
 *   ptr - idx == &index[0]
 *
 * Then a simple container_of() from that pointer gets us to the
 * trace_array descriptor.
 */
static void get_tr_index(void *data, struct trace_array **ptr,
			 unsigned int *pindex)
{}

static ssize_t
trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
			loff_t *ppos)
{}

static ssize_t
trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
			 loff_t *ppos)
{}

static const struct file_operations trace_options_core_fops =;

struct dentry *trace_create_file(const char *name,
				 umode_t mode,
				 struct dentry *parent,
				 void *data,
				 const struct file_operations *fops)
{}


static struct dentry *trace_options_init_dentry(struct trace_array *tr)
{}

static void
create_trace_option_file(struct trace_array *tr,
			 struct trace_option_dentry *topt,
			 struct tracer_flags *flags,
			 struct tracer_opt *opt)
{}

static void
create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
{}

static struct dentry *
create_trace_option_core_file(struct trace_array *tr,
			      const char *option, long index)
{}

static void create_trace_options_dir(struct trace_array *tr)
{}

static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
	       size_t cnt, loff_t *ppos)
{}

static ssize_t
rb_simple_write(struct file *filp, const char __user *ubuf,
		size_t cnt, loff_t *ppos)
{}

static const struct file_operations rb_simple_fops =;

static ssize_t
buffer_percent_read(struct file *filp, char __user *ubuf,
		    size_t cnt, loff_t *ppos)
{}

static ssize_t
buffer_percent_write(struct file *filp, const char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{}

static const struct file_operations buffer_percent_fops =;

static ssize_t
buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{}

static ssize_t
buffer_subbuf_size_write(struct file *filp, const char __user *ubuf,
			 size_t cnt, loff_t *ppos)
{}

static const struct file_operations buffer_subbuf_size_fops =;

static struct dentry *trace_instance_dir;

static void
init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);

static int
allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
{}

static void free_trace_buffer(struct array_buffer *buf)
{}

static int allocate_trace_buffers(struct trace_array *tr, int size)
{}

static void free_trace_buffers(struct trace_array *tr)
{}

static void init_trace_flags_index(struct trace_array *tr)
{}

static void __update_tracer_options(struct trace_array *tr)
{}

static void update_tracer_options(struct trace_array *tr)
{}

/* Must have trace_types_lock held */
struct trace_array *trace_array_find(const char *instance)
{}

struct trace_array *trace_array_find_get(const char *instance)
{}

static int trace_array_create_dir(struct trace_array *tr)
{}

static struct trace_array *
trace_array_create_systems(const char *name, const char *systems)
{}

static struct trace_array *trace_array_create(const char *name)
{}

static int instance_mkdir(const char *name)
{}

/**
 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
 * @name: The name of the trace array to be looked up/created.
 * @systems: A list of systems to create event directories for (NULL for all)
 *
 * Returns pointer to trace array with given name.
 * NULL, if it cannot be created.
 *
 * NOTE: This function increments the reference counter associated with the
 * trace array returned. This makes sure it cannot be freed while in use.
 * Use trace_array_put() once the trace array is no longer needed.
 * If the trace_array is to be freed, trace_array_destroy() needs to
 * be called after the trace_array_put(), or simply let user space delete
 * it from the tracefs instances directory. But until the
 * trace_array_put() is called, user space can not delete it.
 *
 */
struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
{}
EXPORT_SYMBOL_GPL();

static int __remove_instance(struct trace_array *tr)
{}

int trace_array_destroy(struct trace_array *this_tr)
{}
EXPORT_SYMBOL_GPL();

static int instance_rmdir(const char *name)
{}

static __init void create_trace_instances(struct dentry *d_tracer)
{}

static void
init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
{}

static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
{}

/**
 * tracing_init_dentry - initialize top level trace array
 *
 * This is called when creating files or directories in the tracing
 * directory. It is called via fs_initcall() by any of the boot up code
 * and expects to return the dentry of the top level tracing directory.
 */
int tracing_init_dentry(void)
{}

extern struct trace_eval_map *__start_ftrace_eval_maps[];
extern struct trace_eval_map *__stop_ftrace_eval_maps[];

static struct workqueue_struct *eval_map_wq __initdata;
static struct work_struct eval_map_work __initdata;
static struct work_struct tracerfs_init_work __initdata;

static void __init eval_map_work_func(struct work_struct *work)
{}

static int __init trace_eval_init(void)
{}

subsys_initcall(trace_eval_init);

static int __init trace_eval_sync(void)
{}

late_initcall_sync(trace_eval_sync);


#ifdef CONFIG_MODULES
static void trace_module_add_evals(struct module *mod)
{}

#ifdef CONFIG_TRACE_EVAL_MAP_FILE
static void trace_module_remove_evals(struct module *mod)
{}
#else
static inline void trace_module_remove_evals(struct module *mod) { }
#endif /* CONFIG_TRACE_EVAL_MAP_FILE */

static int trace_module_notify(struct notifier_block *self,
			       unsigned long val, void *data)
{}

static struct notifier_block trace_module_nb =;
#endif /* CONFIG_MODULES */

static __init void tracer_init_tracefs_work_func(struct work_struct *work)
{}

static __init int tracer_init_tracefs(void)
{}

fs_initcall(tracer_init_tracefs);

static int trace_die_panic_handler(struct notifier_block *self,
				unsigned long ev, void *unused);

static struct notifier_block trace_panic_notifier =;

static struct notifier_block trace_die_notifier =;

/*
 * The idea is to execute the following die/panic callback early, in order
 * to avoid showing irrelevant information in the trace (like other panic
 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
 * warnings get disabled (to prevent potential log flooding).
 */
static int trace_die_panic_handler(struct notifier_block *self,
				unsigned long ev, void *unused)
{}

/*
 * printk is set to max of 1024, we really don't need it that big.
 * Nothing should be printing 1000 characters anyway.
 */
#define TRACE_MAX_PRINT

/*
 * Define here KERN_TRACE so that we have one place to modify
 * it if we decide to change what log level the ftrace dump
 * should be at.
 */
#define KERN_TRACE

void
trace_printk_seq(struct trace_seq *s)
{}

static void trace_init_iter(struct trace_iterator *iter, struct trace_array *tr)
{}

void trace_init_global_iter(struct trace_iterator *iter)
{}

static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_mode)
{}

static void ftrace_dump_by_param(void)
{}

void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
{}
EXPORT_SYMBOL_GPL();

#define WRITE_BUFSIZE

ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
				size_t count, loff_t *ppos,
				int (*createfn)(const char *))
{}

#ifdef CONFIG_TRACER_MAX_TRACE
__init static bool tr_needs_alloc_snapshot(const char *name)
{}

__init static void do_allocate_snapshot(const char *name)
{}
#else
static inline void do_allocate_snapshot(const char *name) { }
#endif

__init static void enable_instances(void)
{}

__init static int tracer_alloc_buffers(void)
{}

void __init ftrace_boot_snapshot(void)
{}

void __init early_trace_init(void)
{}

void __init trace_init(void)
{}

__init static void clear_boot_tracer(void)
{}

#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
__init static void tracing_set_default_clock(void)
{}
#else
static inline void tracing_set_default_clock(void) { }
#endif

__init static int late_trace_init(void)
{}

late_initcall_sync(late_trace_init);