linux/kernel/events/core.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Performance events core code:
 *
 *  Copyright (C) 2008 Thomas Gleixner <[email protected]>
 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <[email protected]>
 */

#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/idr.h>
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/tick.h>
#include <linux/sysfs.h>
#include <linux/dcache.h>
#include <linux/percpu.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
#include <linux/vmstat.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/hugetlb.h>
#include <linux/rculist.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
#include <linux/cgroup.h>
#include <linux/perf_event.h>
#include <linux/trace_events.h>
#include <linux/hw_breakpoint.h>
#include <linux/mm_types.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/compat.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/namei.h>
#include <linux/parser.h>
#include <linux/sched/clock.h>
#include <linux/sched/mm.h>
#include <linux/proc_ns.h>
#include <linux/mount.h>
#include <linux/min_heap.h>
#include <linux/highmem.h>
#include <linux/pgtable.h>
#include <linux/buildid.h>
#include <linux/task_work.h>

#include "internal.h"

#include <asm/irq_regs.h>

remote_function_f;

struct remote_function_call {};

static void remote_function(void *data)
{}

/**
 * task_function_call - call a function on the cpu on which a task runs
 * @p:		the task to evaluate
 * @func:	the function to be called
 * @info:	the function call argument
 *
 * Calls the function @func when the task is currently running. This might
 * be on the current CPU, which just calls the function directly.  This will
 * retry due to any failures in smp_call_function_single(), such as if the
 * task_cpu() goes offline concurrently.
 *
 * returns @func return value or -ESRCH or -ENXIO when the process isn't running
 */
static int
task_function_call(struct task_struct *p, remote_function_f func, void *info)
{}

/**
 * cpu_function_call - call a function on the cpu
 * @cpu:	target cpu to queue this function
 * @func:	the function to be called
 * @info:	the function call argument
 *
 * Calls the function @func on the remote cpu.
 *
 * returns: @func return value or -ENXIO when the cpu is offline
 */
static int cpu_function_call(int cpu, remote_function_f func, void *info)
{}

static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
			  struct perf_event_context *ctx)
{}

static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
			    struct perf_event_context *ctx)
{}

#define TASK_TOMBSTONE

static bool is_kernel_event(struct perf_event *event)
{}

static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);

struct perf_event_context *perf_cpu_task_ctx(void)
{}

/*
 * On task ctx scheduling...
 *
 * When !ctx->nr_events a task context will not be scheduled. This means
 * we can disable the scheduler hooks (for performance) without leaving
 * pending task ctx state.
 *
 * This however results in two special cases:
 *
 *  - removing the last event from a task ctx; this is relatively straight
 *    forward and is done in __perf_remove_from_context.
 *
 *  - adding the first event to a task ctx; this is tricky because we cannot
 *    rely on ctx->is_active and therefore cannot use event_function_call().
 *    See perf_install_in_context().
 *
 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
 */

event_f;

struct event_function_struct {};

static int event_function(void *info)
{}

static void event_function_call(struct perf_event *event, event_f func, void *data)
{}

/*
 * Similar to event_function_call() + event_function(), but hard assumes IRQs
 * are already disabled and we're on the right CPU.
 */
static void event_function_local(struct perf_event *event, event_f func, void *data)
{}

#define PERF_FLAG_ALL

/*
 * branch priv levels that need permission checks
 */
#define PERF_SAMPLE_BRANCH_PERM_PLM

enum event_type_t {};

/*
 * perf_sched_events : >0 events exist
 */

static void perf_sched_delayed(struct work_struct *work);
DEFINE_STATIC_KEY_FALSE(perf_sched_events);
static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
static DEFINE_MUTEX(perf_sched_mutex);
static atomic_t perf_sched_count;

static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);

static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_namespaces_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
static atomic_t nr_freq_events __read_mostly;
static atomic_t nr_switch_events __read_mostly;
static atomic_t nr_ksymbol_events __read_mostly;
static atomic_t nr_bpf_events __read_mostly;
static atomic_t nr_cgroup_events __read_mostly;
static atomic_t nr_text_poke_events __read_mostly;
static atomic_t nr_build_id_events __read_mostly;

static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;
static cpumask_var_t perf_online_mask;
static struct kmem_cache *perf_event_cache;

/*
 * perf event paranoia level:
 *  -1 - not paranoid at all
 *   0 - disallow raw tracepoint access for unpriv
 *   1 - disallow cpu events for unpriv
 *   2 - disallow kernel profiling for unpriv
 */
int sysctl_perf_event_paranoid __read_mostly =;

/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly =; /* 'free' kiB per user */

/*
 * max perf event sample rate
 */
#define DEFAULT_MAX_SAMPLE_RATE
#define DEFAULT_SAMPLE_PERIOD_NS
#define DEFAULT_CPU_TIME_MAX_PERCENT

int sysctl_perf_event_sample_rate __read_mostly	=;

static int max_samples_per_tick __read_mostly	=;
static int perf_sample_period_ns __read_mostly	=;

static int perf_sample_allowed_ns __read_mostly =;

static void update_perf_cpu_limits(void)
{}

static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc);

int perf_event_max_sample_rate_handler(const struct ctl_table *table, int write,
				       void *buffer, size_t *lenp, loff_t *ppos)
{}

int sysctl_perf_cpu_time_max_percent __read_mostly =;

int perf_cpu_time_max_percent_handler(const struct ctl_table *table, int write,
		void *buffer, size_t *lenp, loff_t *ppos)
{}

/*
 * perf samples are done in some very critical code paths (NMIs).
 * If they take too much CPU time, the system can lock up and not
 * get any real work done.  This will drop the sample rate when
 * we detect that events are taking too long.
 */
#define NR_ACCUMULATED_SAMPLES
static DEFINE_PER_CPU(u64, running_sample_length);

static u64 __report_avg;
static u64 __report_allowed;

static void perf_duration_warn(struct irq_work *w)
{}

static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);

void perf_sample_event_took(u64 sample_len_ns)
{}

static atomic64_t perf_event_id;

static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);

void __weak perf_event_print_debug(void)	{}

static inline u64 perf_clock(void)
{}

static inline u64 perf_event_clock(struct perf_event *event)
{}

/*
 * State based event timekeeping...
 *
 * The basic idea is to use event->state to determine which (if any) time
 * fields to increment with the current delta. This means we only need to
 * update timestamps when we change state or when they are explicitly requested
 * (read).
 *
 * Event groups make things a little more complicated, but not terribly so. The
 * rules for a group are that if the group leader is OFF the entire group is
 * OFF, irrespective of what the group member states are. This results in
 * __perf_effective_state().
 *
 * A further ramification is that when a group leader flips between OFF and
 * !OFF, we need to update all group member times.
 *
 *
 * NOTE: perf_event_time() is based on the (cgroup) context time, and thus we
 * need to make sure the relevant context time is updated before we try and
 * update our timestamps.
 */

static __always_inline enum perf_event_state
__perf_effective_state(struct perf_event *event)
{}

static __always_inline void
__perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running)
{}

static void perf_event_update_time(struct perf_event *event)
{}

static void perf_event_update_sibling_time(struct perf_event *leader)
{}

static void
perf_event_set_state(struct perf_event *event, enum perf_event_state state)
{}

/*
 * UP store-release, load-acquire
 */

#define __store_release(ptr, val)

#define __load_acquire(ptr)

static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup)
{}

static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup)
{}

static void ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type);
static void ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type);

#ifdef CONFIG_CGROUP_PERF

static inline bool
perf_cgroup_match(struct perf_event *event)
{}

static inline void perf_detach_cgroup(struct perf_event *event)
{}

static inline int is_cgroup_event(struct perf_event *event)
{}

static inline u64 perf_cgroup_event_time(struct perf_event *event)
{}

static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
{}

static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, bool adv)
{}

static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final)
{}

static inline void update_cgrp_time_from_event(struct perf_event *event)
{}

static inline void
perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
{}

/*
 * reschedule events based on the cgroup constraint of task.
 */
static void perf_cgroup_switch(struct task_struct *task)
{}

static int perf_cgroup_ensure_storage(struct perf_event *event,
				struct cgroup_subsys_state *css)
{}

static inline int perf_cgroup_connect(int fd, struct perf_event *event,
				      struct perf_event_attr *attr,
				      struct perf_event *group_leader)
{}

static inline void
perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
{}

static inline void
perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
{}

#else /* !CONFIG_CGROUP_PERF */

static inline bool
perf_cgroup_match(struct perf_event *event)
{
	return true;
}

static inline void perf_detach_cgroup(struct perf_event *event)
{}

static inline int is_cgroup_event(struct perf_event *event)
{
	return 0;
}

static inline void update_cgrp_time_from_event(struct perf_event *event)
{
}

static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
						bool final)
{
}

static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
				      struct perf_event_attr *attr,
				      struct perf_event *group_leader)
{
	return -EINVAL;
}

static inline void
perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
{
}

static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
	return 0;
}

static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
{
	return 0;
}

static inline void
perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
{
}

static inline void
perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
{
}

static void perf_cgroup_switch(struct task_struct *task)
{
}
#endif

/*
 * set default to be dependent on timer tick just
 * like original code
 */
#define PERF_CPU_HRTIMER
/*
 * function must be called with interrupts disabled
 */
static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
{}

static void __perf_mux_hrtimer_init(struct perf_cpu_pmu_context *cpc, int cpu)
{}

static int perf_mux_hrtimer_restart(struct perf_cpu_pmu_context *cpc)
{}

static int perf_mux_hrtimer_restart_ipi(void *arg)
{}

void perf_pmu_disable(struct pmu *pmu)
{}

void perf_pmu_enable(struct pmu *pmu)
{}

static void perf_assert_pmu_disabled(struct pmu *pmu)
{}

static void get_ctx(struct perf_event_context *ctx)
{}

static void *alloc_task_ctx_data(struct pmu *pmu)
{}

static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data)
{}

static void free_ctx(struct rcu_head *head)
{}

static void put_ctx(struct perf_event_context *ctx)
{}

/*
 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
 * perf_pmu_migrate_context() we need some magic.
 *
 * Those places that change perf_event::ctx will hold both
 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
 *
 * Lock ordering is by mutex address. There are two other sites where
 * perf_event_context::mutex nests and those are:
 *
 *  - perf_event_exit_task_context()	[ child , 0 ]
 *      perf_event_exit_event()
 *        put_event()			[ parent, 1 ]
 *
 *  - perf_event_init_context()		[ parent, 0 ]
 *      inherit_task_group()
 *        inherit_group()
 *          inherit_event()
 *            perf_event_alloc()
 *              perf_init_event()
 *                perf_try_init_event()	[ child , 1 ]
 *
 * While it appears there is an obvious deadlock here -- the parent and child
 * nesting levels are inverted between the two. This is in fact safe because
 * life-time rules separate them. That is an exiting task cannot fork, and a
 * spawning task cannot (yet) exit.
 *
 * But remember that these are parent<->child context relations, and
 * migration does not affect children, therefore these two orderings should not
 * interact.
 *
 * The change in perf_event::ctx does not affect children (as claimed above)
 * because the sys_perf_event_open() case will install a new event and break
 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
 * concerned with cpuctx and that doesn't have children.
 *
 * The places that change perf_event::ctx will issue:
 *
 *   perf_remove_from_context();
 *   synchronize_rcu();
 *   perf_install_in_context();
 *
 * to affect the change. The remove_from_context() + synchronize_rcu() should
 * quiesce the event, after which we can install it in the new location. This
 * means that only external vectors (perf_fops, prctl) can perturb the event
 * while in transit. Therefore all such accessors should also acquire
 * perf_event_context::mutex to serialize against this.
 *
 * However; because event->ctx can change while we're waiting to acquire
 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
 * function.
 *
 * Lock order:
 *    exec_update_lock
 *	task_struct::perf_event_mutex
 *	  perf_event_context::mutex
 *	    perf_event::child_mutex;
 *	      perf_event_context::lock
 *	    perf_event::mmap_mutex
 *	    mmap_lock
 *	      perf_addr_filters_head::lock
 *
 *    cpu_hotplug_lock
 *      pmus_lock
 *	  cpuctx->mutex / perf_event_context::mutex
 */
static struct perf_event_context *
perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
{}

static inline struct perf_event_context *
perf_event_ctx_lock(struct perf_event *event)
{}

static void perf_event_ctx_unlock(struct perf_event *event,
				  struct perf_event_context *ctx)
{}

/*
 * This must be done under the ctx->lock, such as to serialize against
 * context_equiv(), therefore we cannot call put_ctx() since that might end up
 * calling scheduler related locks and ctx->lock nests inside those.
 */
static __must_check struct perf_event_context *
unclone_ctx(struct perf_event_context *ctx)
{}

static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p,
				enum pid_type type)
{}

static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
{}

static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
{}

/*
 * If we inherit events we want to return the parent event id
 * to userspace.
 */
static u64 primary_event_id(struct perf_event *event)
{}

/*
 * Get the perf_event_context for a task and lock it.
 *
 * This has to cope with the fact that until it is locked,
 * the context could get moved to another task.
 */
static struct perf_event_context *
perf_lock_task_context(struct task_struct *task, unsigned long *flags)
{}

/*
 * Get the context for a task and increment its pin_count so it
 * can't get swapped to another task.  This also increments its
 * reference count so that the context can't get freed.
 */
static struct perf_event_context *
perf_pin_task_context(struct task_struct *task)
{}

static void perf_unpin_context(struct perf_event_context *ctx)
{}

/*
 * Update the record of the current time in a context.
 */
static void __update_context_time(struct perf_event_context *ctx, bool adv)
{}

static void update_context_time(struct perf_event_context *ctx)
{}

static u64 perf_event_time(struct perf_event *event)
{}

static u64 perf_event_time_now(struct perf_event *event, u64 now)
{}

static enum event_type_t get_event_type(struct perf_event *event)
{}

/*
 * Helper function to initialize event group nodes.
 */
static void init_event_group(struct perf_event *event)
{}

/*
 * Extract pinned or flexible groups from the context
 * based on event attrs bits.
 */
static struct perf_event_groups *
get_event_groups(struct perf_event *event, struct perf_event_context *ctx)
{}

/*
 * Helper function to initializes perf_event_group trees.
 */
static void perf_event_groups_init(struct perf_event_groups *groups)
{}

static inline struct cgroup *event_cgroup(const struct perf_event *event)
{}

/*
 * Compare function for event groups;
 *
 * Implements complex key that first sorts by CPU and then by virtual index
 * which provides ordering when rotating groups for the same CPU.
 */
static __always_inline int
perf_event_groups_cmp(const int left_cpu, const struct pmu *left_pmu,
		      const struct cgroup *left_cgroup, const u64 left_group_index,
		      const struct perf_event *right)
{}

#define __node_2_pe(node)

static inline bool __group_less(struct rb_node *a, const struct rb_node *b)
{}

struct __group_key {};

static inline int __group_cmp(const void *key, const struct rb_node *node)
{}

static inline int
__group_cmp_ignore_cgroup(const void *key, const struct rb_node *node)
{}

/*
 * Insert @event into @groups' tree; using
 *   {@event->cpu, @event->pmu_ctx->pmu, event_cgroup(@event), ++@groups->index}
 * as key. This places it last inside the {cpu,pmu,cgroup} subtree.
 */
static void
perf_event_groups_insert(struct perf_event_groups *groups,
			 struct perf_event *event)
{}

/*
 * Helper function to insert event into the pinned or flexible groups.
 */
static void
add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
{}

/*
 * Delete a group from a tree.
 */
static void
perf_event_groups_delete(struct perf_event_groups *groups,
			 struct perf_event *event)
{}

/*
 * Helper function to delete event from its groups.
 */
static void
del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
{}

/*
 * Get the leftmost event in the {cpu,pmu,cgroup} subtree.
 */
static struct perf_event *
perf_event_groups_first(struct perf_event_groups *groups, int cpu,
			struct pmu *pmu, struct cgroup *cgrp)
{}

static struct perf_event *
perf_event_groups_next(struct perf_event *event, struct pmu *pmu)
{}

#define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu)

/*
 * Iterate through the whole groups tree.
 */
#define perf_event_groups_for_each(event, groups)

/*
 * Add an event from the lists for its context.
 * Must be called with ctx->mutex and ctx->lock held.
 */
static void
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
{}

/*
 * Initialize event state based on the perf_event_attr::disabled.
 */
static inline void perf_event__state_init(struct perf_event *event)
{}

static int __perf_event_read_size(u64 read_format, int nr_siblings)
{}

static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
{}

/*
 * Called at perf_event creation and when events are attached/detached from a
 * group.
 */
static void perf_event__header_size(struct perf_event *event)
{}

static void perf_event__id_header_size(struct perf_event *event)
{}

/*
 * Check that adding an event to the group does not result in anybody
 * overflowing the 64k event limit imposed by the output buffer.
 *
 * Specifically, check that the read_size for the event does not exceed 16k,
 * read_size being the one term that grows with groups size. Since read_size
 * depends on per-event read_format, also (re)check the existing events.
 *
 * This leaves 48k for the constant size fields and things like callchains,
 * branch stacks and register sets.
 */
static bool perf_event_validate_size(struct perf_event *event)
{}

static void perf_group_attach(struct perf_event *event)
{}

/*
 * Remove an event from the lists for its context.
 * Must be called with ctx->mutex and ctx->lock held.
 */
static void
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
{}

static int
perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event)
{}

static void put_event(struct perf_event *event);
static void event_sched_out(struct perf_event *event,
			    struct perf_event_context *ctx);

static void perf_put_aux_event(struct perf_event *event)
{}

static bool perf_need_aux_event(struct perf_event *event)
{}

static int perf_get_aux_event(struct perf_event *event,
			      struct perf_event *group_leader)
{}

static inline struct list_head *get_event_list(struct perf_event *event)
{}

/*
 * Events that have PERF_EV_CAP_SIBLING require being part of a group and
 * cannot exist on their own, schedule them out and move them into the ERROR
 * state. Also see _perf_event_enable(), it will not be able to recover
 * this ERROR state.
 */
static inline void perf_remove_sibling_event(struct perf_event *event)
{}

static void perf_group_detach(struct perf_event *event)
{}

static void sync_child_event(struct perf_event *child_event);

static void perf_child_detach(struct perf_event *event)
{}

static bool is_orphaned_event(struct perf_event *event)
{}

static inline int
event_filter_match(struct perf_event *event)
{}

static void
event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
{}

static void
group_sched_out(struct perf_event *group_event, struct perf_event_context *ctx)
{}

#define DETACH_GROUP
#define DETACH_CHILD
#define DETACH_DEAD

/*
 * Cross CPU call to remove a performance event
 *
 * We disable the event on the hardware level first. After that we
 * remove it from the context list.
 */
static void
__perf_remove_from_context(struct perf_event *event,
			   struct perf_cpu_context *cpuctx,
			   struct perf_event_context *ctx,
			   void *info)
{}

/*
 * Remove the event from a task's (or a CPU's) list of events.
 *
 * If event->ctx is a cloned context, callers must make sure that
 * every task struct that event->ctx->task could possibly point to
 * remains valid.  This is OK when called from perf_release since
 * that only calls us on the top-level context, which can't be a clone.
 * When called from perf_event_exit_task, it's OK because the
 * context has been detached from its task.
 */
static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
{}

/*
 * Cross CPU call to disable a performance event
 */
static void __perf_event_disable(struct perf_event *event,
				 struct perf_cpu_context *cpuctx,
				 struct perf_event_context *ctx,
				 void *info)
{}

/*
 * Disable an event.
 *
 * If event->ctx is a cloned context, callers must make sure that
 * every task struct that event->ctx->task could possibly point to
 * remains valid.  This condition is satisfied when called through
 * perf_event_for_each_child or perf_event_for_each because they
 * hold the top-level event's child_mutex, so any descendant that
 * goes to exit will block in perf_event_exit_event().
 *
 * When called from perf_pending_disable it's OK because event->ctx
 * is the current context on this CPU and preemption is disabled,
 * hence we can't get into perf_event_task_sched_out for this context.
 */
static void _perf_event_disable(struct perf_event *event)
{}

void perf_event_disable_local(struct perf_event *event)
{}

/*
 * Strictly speaking kernel users cannot create groups and therefore this
 * interface does not need the perf_event_ctx_lock() magic.
 */
void perf_event_disable(struct perf_event *event)
{}
EXPORT_SYMBOL_GPL();

void perf_event_disable_inatomic(struct perf_event *event)
{}

#define MAX_INTERRUPTS

static void perf_log_throttle(struct perf_event *event, int enable);
static void perf_log_itrace_start(struct perf_event *event);

static int
event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
{}

static int
group_sched_in(struct perf_event *group_event, struct perf_event_context *ctx)
{}

/*
 * Work out whether we can put this event group on the CPU now.
 */
static int group_can_go_on(struct perf_event *event, int can_add_hw)
{}

static void add_event_to_ctx(struct perf_event *event,
			       struct perf_event_context *ctx)
{}

static void task_ctx_sched_out(struct perf_event_context *ctx,
				enum event_type_t event_type)
{}

static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
				struct perf_event_context *ctx)
{}

/*
 * We want to maintain the following priority of scheduling:
 *  - CPU pinned (EVENT_CPU | EVENT_PINNED)
 *  - task pinned (EVENT_PINNED)
 *  - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE)
 *  - task flexible (EVENT_FLEXIBLE).
 *
 * In order to avoid unscheduling and scheduling back in everything every
 * time an event is added, only do it for the groups of equal priority and
 * below.
 *
 * This can be called after a batch operation on task events, in which case
 * event_type is a bit mask of the types of events involved. For CPU events,
 * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
 */
/*
 * XXX: ctx_resched() reschedule entire perf_event_context while adding new
 * event to the context or enabling existing event in the context. We can
 * probably optimize it by rescheduling only affected pmu_ctx.
 */
static void ctx_resched(struct perf_cpu_context *cpuctx,
			struct perf_event_context *task_ctx,
			enum event_type_t event_type)
{}

void perf_pmu_resched(struct pmu *pmu)
{}

/*
 * Cross CPU call to install and enable a performance event
 *
 * Very similar to remote_function() + event_function() but cannot assume that
 * things like ctx->is_active and cpuctx->task_ctx are set.
 */
static int  __perf_install_in_context(void *info)
{}

static bool exclusive_event_installable(struct perf_event *event,
					struct perf_event_context *ctx);

/*
 * Attach a performance event to a context.
 *
 * Very similar to event_function_call, see comment there.
 */
static void
perf_install_in_context(struct perf_event_context *ctx,
			struct perf_event *event,
			int cpu)
{}

/*
 * Cross CPU call to enable a performance event
 */
static void __perf_event_enable(struct perf_event *event,
				struct perf_cpu_context *cpuctx,
				struct perf_event_context *ctx,
				void *info)
{}

/*
 * Enable an event.
 *
 * If event->ctx is a cloned context, callers must make sure that
 * every task struct that event->ctx->task could possibly point to
 * remains valid.  This condition is satisfied when called through
 * perf_event_for_each_child or perf_event_for_each as described
 * for perf_event_disable.
 */
static void _perf_event_enable(struct perf_event *event)
{}

/*
 * See perf_event_disable();
 */
void perf_event_enable(struct perf_event *event)
{}
EXPORT_SYMBOL_GPL();

struct stop_event_data {};

static int __perf_event_stop(void *info)
{}

static int perf_event_stop(struct perf_event *event, int restart)
{}

/*
 * In order to contain the amount of racy and tricky in the address filter
 * configuration management, it is a two part process:
 *
 * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
 *      we update the addresses of corresponding vmas in
 *	event::addr_filter_ranges array and bump the event::addr_filters_gen;
 * (p2) when an event is scheduled in (pmu::add), it calls
 *      perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
 *      if the generation has changed since the previous call.
 *
 * If (p1) happens while the event is active, we restart it to force (p2).
 *
 * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
 *     pre-existing mappings, called once when new filters arrive via SET_FILTER
 *     ioctl;
 * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
 *     registered mapping, called for every new mmap(), with mm::mmap_lock down
 *     for reading;
 * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
 *     of exec.
 */
void perf_event_addr_filters_sync(struct perf_event *event)
{}
EXPORT_SYMBOL_GPL();

static int _perf_event_refresh(struct perf_event *event, int refresh)
{}

/*
 * See perf_event_disable()
 */
int perf_event_refresh(struct perf_event *event, int refresh)
{}
EXPORT_SYMBOL_GPL();

static int perf_event_modify_breakpoint(struct perf_event *bp,
					 struct perf_event_attr *attr)
{}

/*
 * Copy event-type-independent attributes that may be modified.
 */
static void perf_event_modify_copy_attr(struct perf_event_attr *to,
					const struct perf_event_attr *from)
{}

static int perf_event_modify_attr(struct perf_event *event,
				  struct perf_event_attr *attr)
{}

static void __pmu_ctx_sched_out(struct perf_event_pmu_context *pmu_ctx,
				enum event_type_t event_type)
{}

static void
ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
{}

/*
 * Test whether two contexts are equivalent, i.e. whether they have both been
 * cloned from the same version of the same context.
 *
 * Equivalence is measured using a generation number in the context that is
 * incremented on each modification to it; see unclone_ctx(), list_add_event()
 * and list_del_event().
 */
static int context_equiv(struct perf_event_context *ctx1,
			 struct perf_event_context *ctx2)
{}

static void __perf_event_sync_stat(struct perf_event *event,
				     struct perf_event *next_event)
{}

static void perf_event_sync_stat(struct perf_event_context *ctx,
				   struct perf_event_context *next_ctx)
{}

#define double_list_for_each_entry(pos1, pos2, head1, head2, member)

static void perf_event_swap_task_ctx_data(struct perf_event_context *prev_ctx,
					  struct perf_event_context *next_ctx)
{}

static void perf_ctx_sched_task_cb(struct perf_event_context *ctx, bool sched_in)
{}

static void
perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
{}

static DEFINE_PER_CPU(struct list_head, sched_cb_list);
static DEFINE_PER_CPU(int, perf_sched_cb_usages);

void perf_sched_cb_dec(struct pmu *pmu)
{}


void perf_sched_cb_inc(struct pmu *pmu)
{}

/*
 * This function provides the context switch callback to the lower code
 * layer. It is invoked ONLY when the context switch callback is enabled.
 *
 * This callback is relevant even to per-cpu events; for example multi event
 * PEBS requires this to provide PID/TID information. This requires we flush
 * all queued PEBS records before we context switch to a new task.
 */
static void __perf_pmu_sched_task(struct perf_cpu_pmu_context *cpc, bool sched_in)
{}

static void perf_pmu_sched_task(struct task_struct *prev,
				struct task_struct *next,
				bool sched_in)
{}

static void perf_event_switch(struct task_struct *task,
			      struct task_struct *next_prev, bool sched_in);

/*
 * Called from scheduler to remove the events of the current task,
 * with interrupts disabled.
 *
 * We stop each event and update the event value in event->count.
 *
 * This does not protect us against NMI, but disable()
 * sets the disabled bit in the control field of event _before_
 * accessing the event control register. If a NMI hits, then it will
 * not restart the event.
 */
void __perf_event_task_sched_out(struct task_struct *task,
				 struct task_struct *next)
{}

static bool perf_less_group_idx(const void *l, const void *r, void __always_unused *args)
{}

static void swap_ptr(void *l, void *r, void __always_unused *args)
{}

DEFINE_MIN_HEAP(} ;

static const struct min_heap_callbacks perf_min_heap =;

static void __heap_add(struct perf_event_min_heap *heap, struct perf_event *event)
{}

static void __link_epc(struct perf_event_pmu_context *pmu_ctx)
{}

static noinline int visit_groups_merge(struct perf_event_context *ctx,
				struct perf_event_groups *groups, int cpu,
				struct pmu *pmu,
				int (*func)(struct perf_event *, void *),
				void *data)
{}

/*
 * Because the userpage is strictly per-event (there is no concept of context,
 * so there cannot be a context indirection), every userpage must be updated
 * when context time starts :-(
 *
 * IOW, we must not miss EVENT_TIME edges.
 */
static inline bool event_update_userpage(struct perf_event *event)
{}

static inline void group_update_userpage(struct perf_event *group_event)
{}

static int merge_sched_in(struct perf_event *event, void *data)
{}

static void pmu_groups_sched_in(struct perf_event_context *ctx,
				struct perf_event_groups *groups,
				struct pmu *pmu)
{}

static void ctx_groups_sched_in(struct perf_event_context *ctx,
				struct perf_event_groups *groups,
				bool cgroup)
{}

static void __pmu_ctx_sched_in(struct perf_event_context *ctx,
			       struct pmu *pmu)
{}

static void
ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
{}

static void perf_event_context_sched_in(struct task_struct *task)
{}

/*
 * Called from scheduler to add the events of the current task
 * with interrupts disabled.
 *
 * We restore the event value and then enable it.
 *
 * This does not protect us against NMI, but enable()
 * sets the enabled bit in the control field of event _before_
 * accessing the event control register. If a NMI hits, then it will
 * keep the event running.
 */
void __perf_event_task_sched_in(struct task_struct *prev,
				struct task_struct *task)
{}

static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{}

static DEFINE_PER_CPU(int, perf_throttled_count);
static DEFINE_PER_CPU(u64, perf_throttled_seq);

static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
{}

static void perf_adjust_freq_unthr_events(struct list_head *event_list)
{}

/*
 * combine freq adjustment with unthrottling to avoid two passes over the
 * events. At the same time, make sure, having freq events does not change
 * the rate of unthrottling as that would introduce bias.
 */
static void
perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
{}

/*
 * Move @event to the tail of the @ctx's elegible events.
 */
static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
{}

/* pick an event from the flexible_groups to rotate */
static inline struct perf_event *
ctx_event_to_rotate(struct perf_event_pmu_context *pmu_ctx)
{}

static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc)
{}

void perf_event_task_tick(void)
{}

static int event_enable_on_exec(struct perf_event *event,
				struct perf_event_context *ctx)
{}

/*
 * Enable all of a task's events that have been marked enable-on-exec.
 * This expects task == current.
 */
static void perf_event_enable_on_exec(struct perf_event_context *ctx)
{}

static void perf_remove_from_owner(struct perf_event *event);
static void perf_event_exit_event(struct perf_event *event,
				  struct perf_event_context *ctx);

/*
 * Removes all events from the current task that have been marked
 * remove-on-exec, and feeds their values back to parent events.
 */
static void perf_event_remove_on_exec(struct perf_event_context *ctx)
{}

struct perf_read_data {};

static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
{}

/*
 * Cross CPU call to read the hardware event
 */
static void __perf_event_read(void *info)
{}

static inline u64 perf_event_count(struct perf_event *event)
{}

static void calc_timer_values(struct perf_event *event,
				u64 *now,
				u64 *enabled,
				u64 *running)
{}

/*
 * NMI-safe method to read a local event, that is an event that
 * is:
 *   - either for the current task, or for this CPU
 *   - does not have inherit set, for inherited task events
 *     will not be local and we cannot read them atomically
 *   - must not have a pmu::count method
 */
int perf_event_read_local(struct perf_event *event, u64 *value,
			  u64 *enabled, u64 *running)
{}

static int perf_event_read(struct perf_event *event, bool group)
{}

/*
 * Initialize the perf_event context in a task_struct:
 */
static void __perf_event_init_context(struct perf_event_context *ctx)
{}

static void
__perf_init_event_pmu_context(struct perf_event_pmu_context *epc, struct pmu *pmu)
{}

static struct perf_event_context *
alloc_perf_context(struct task_struct *task)
{}

static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)
{}

/*
 * Returns a matching context with refcount and pincount.
 */
static struct perf_event_context *
find_get_context(struct task_struct *task, struct perf_event *event)
{}

static struct perf_event_pmu_context *
find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
		     struct perf_event *event)
{}

static void get_pmu_ctx(struct perf_event_pmu_context *epc)
{}

static void free_epc_rcu(struct rcu_head *head)
{}

static void put_pmu_ctx(struct perf_event_pmu_context *epc)
{}

static void perf_event_free_filter(struct perf_event *event);

static void free_event_rcu(struct rcu_head *head)
{}

static void ring_buffer_attach(struct perf_event *event,
			       struct perf_buffer *rb);

static void detach_sb_event(struct perf_event *event)
{}

static bool is_sb_event(struct perf_event *event)
{}

static void unaccount_pmu_sb_event(struct perf_event *event)
{}

#ifdef CONFIG_NO_HZ_FULL
static DEFINE_SPINLOCK(nr_freq_lock);
#endif

static void unaccount_freq_event_nohz(void)
{}

static void unaccount_freq_event(void)
{}

static void unaccount_event(struct perf_event *event)
{}

static void perf_sched_delayed(struct work_struct *work)
{}

/*
 * The following implement mutual exclusion of events on "exclusive" pmus
 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
 * at a time, so we disallow creating events that might conflict, namely:
 *
 *  1) cpu-wide events in the presence of per-task events,
 *  2) per-task events in the presence of cpu-wide events,
 *  3) two matching events on the same perf_event_context.
 *
 * The former two cases are handled in the allocation path (perf_event_alloc(),
 * _free_event()), the latter -- before the first perf_install_in_context().
 */
static int exclusive_event_init(struct perf_event *event)
{}

static void exclusive_event_destroy(struct perf_event *event)
{}

static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
{}

static bool exclusive_event_installable(struct perf_event *event,
					struct perf_event_context *ctx)
{}

static void perf_addr_filters_splice(struct perf_event *event,
				       struct list_head *head);

static void perf_pending_task_sync(struct perf_event *event)
{}

static void _free_event(struct perf_event *event)
{}

/*
 * Used to free events which have a known refcount of 1, such as in error paths
 * where the event isn't exposed yet and inherited events.
 */
static void free_event(struct perf_event *event)
{}

/*
 * Remove user event from the owner task.
 */
static void perf_remove_from_owner(struct perf_event *event)
{}

static void put_event(struct perf_event *event)
{}

/*
 * Kill an event dead; while event:refcount will preserve the event
 * object, it will not preserve its functionality. Once the last 'user'
 * gives up the object, we'll destroy the thing.
 */
int perf_event_release_kernel(struct perf_event *event)
{}
EXPORT_SYMBOL_GPL();

/*
 * Called when the last reference to the file is gone.
 */
static int perf_release(struct inode *inode, struct file *file)
{}

static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
{}

u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
{}
EXPORT_SYMBOL_GPL();

static int __perf_read_group_add(struct perf_event *leader,
					u64 read_format, u64 *values)
{}

static int perf_read_group(struct perf_event *event,
				   u64 read_format, char __user *buf)
{}

static int perf_read_one(struct perf_event *event,
				 u64 read_format, char __user *buf)
{}

static bool is_event_hup(struct perf_event *event)
{}

/*
 * Read the performance event - simple non blocking version for now
 */
static ssize_t
__perf_read(struct perf_event *event, char __user *buf, size_t count)
{}

static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{}

static __poll_t perf_poll(struct file *file, poll_table *wait)
{}

static void _perf_event_reset(struct perf_event *event)
{}

/* Assume it's not an event with inherit set. */
u64 perf_event_pause(struct perf_event *event, bool reset)
{}
EXPORT_SYMBOL_GPL();

/*
 * Holding the top-level event's child_mutex means that any
 * descendant process that has inherited this event will block
 * in perf_event_exit_event() if it goes to exit, thus satisfying the
 * task existence requirements of perf_event_enable/disable.
 */
static void perf_event_for_each_child(struct perf_event *event,
					void (*func)(struct perf_event *))
{}

static void perf_event_for_each(struct perf_event *event,
				  void (*func)(struct perf_event *))
{}

static void __perf_event_period(struct perf_event *event,
				struct perf_cpu_context *cpuctx,
				struct perf_event_context *ctx,
				void *info)
{}

static int perf_event_check_period(struct perf_event *event, u64 value)
{}

static int _perf_event_period(struct perf_event *event, u64 value)
{}

int perf_event_period(struct perf_event *event, u64 value)
{}
EXPORT_SYMBOL_GPL();

static const struct file_operations perf_fops;

static inline int perf_fget_light(int fd, struct fd *p)
{}

static int perf_event_set_output(struct perf_event *event,
				 struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
static int perf_copy_attr(struct perf_event_attr __user *uattr,
			  struct perf_event_attr *attr);

static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
{}

static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{}

#ifdef CONFIG_COMPAT
static long perf_compat_ioctl(struct file *file, unsigned int cmd,
				unsigned long arg)
{}
#else
#define perf_compat_ioctl
#endif

int perf_event_task_enable(void)
{}

int perf_event_task_disable(void)
{}

static int perf_event_index(struct perf_event *event)
{}

static void perf_event_init_userpage(struct perf_event *event)
{}

void __weak arch_perf_update_userpage(
	struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
{}

/*
 * Callers need to ensure there can be no nesting of this function, otherwise
 * the seqlock logic goes bad. We can not serialize this because the arch
 * code calls this from NMI context.
 */
void perf_event_update_userpage(struct perf_event *event)
{}
EXPORT_SYMBOL_GPL();

static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
{}

static void ring_buffer_attach(struct perf_event *event,
			       struct perf_buffer *rb)
{}

static void ring_buffer_wakeup(struct perf_event *event)
{}

struct perf_buffer *ring_buffer_get(struct perf_event *event)
{}

void ring_buffer_put(struct perf_buffer *rb)
{}

static void perf_mmap_open(struct vm_area_struct *vma)
{}

static void perf_pmu_output_stop(struct perf_event *event);

/*
 * A buffer can be mmap()ed multiple times; either directly through the same
 * event, or through other events by use of perf_event_set_output().
 *
 * In order to undo the VM accounting done by perf_mmap() we need to destroy
 * the buffer here, where we still have a VM context. This means we need
 * to detach all events redirecting to us.
 */
static void perf_mmap_close(struct vm_area_struct *vma)
{}

static const struct vm_operations_struct perf_mmap_vmops =;

static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{}

static int perf_fasync(int fd, struct file *filp, int on)
{}

static const struct file_operations perf_fops =;

/*
 * Perf event wakeup
 *
 * If there's data, ensure we set the poll() state and publish everything
 * to user-space before waking everybody up.
 */

void perf_event_wakeup(struct perf_event *event)
{}

static void perf_sigtrap(struct perf_event *event)
{}

/*
 * Deliver the pending work in-event-context or follow the context.
 */
static void __perf_pending_disable(struct perf_event *event)
{}

static void perf_pending_disable(struct irq_work *entry)
{}

static void perf_pending_irq(struct irq_work *entry)
{}

static void perf_pending_task(struct callback_head *head)
{}

#ifdef CONFIG_GUEST_PERF_EVENTS
struct perf_guest_info_callbacks __rcu *perf_guest_cbs;

DEFINE_STATIC_CALL_RET0();
DEFINE_STATIC_CALL_RET0();
DEFINE_STATIC_CALL_RET0();

void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{}
EXPORT_SYMBOL_GPL();

void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{}
EXPORT_SYMBOL_GPL();
#endif

static void
perf_output_sample_regs(struct perf_output_handle *handle,
			struct pt_regs *regs, u64 mask)
{}

static void perf_sample_regs_user(struct perf_regs *regs_user,
				  struct pt_regs *regs)
{}

static void perf_sample_regs_intr(struct perf_regs *regs_intr,
				  struct pt_regs *regs)
{}


/*
 * Get remaining task size from user stack pointer.
 *
 * It'd be better to take stack vma map and limit this more
 * precisely, but there's no way to get it safely under interrupt,
 * so using TASK_SIZE as limit.
 */
static u64 perf_ustack_task_size(struct pt_regs *regs)
{}

static u16
perf_sample_ustack_size(u16 stack_size, u16 header_size,
			struct pt_regs *regs)
{}

static void
perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
			  struct pt_regs *regs)
{}

static unsigned long perf_prepare_sample_aux(struct perf_event *event,
					  struct perf_sample_data *data,
					  size_t size)
{}

static long perf_pmu_snapshot_aux(struct perf_buffer *rb,
                                 struct perf_event *event,
                                 struct perf_output_handle *handle,
                                 unsigned long size)
{}

static void perf_aux_sample_output(struct perf_event *event,
				   struct perf_output_handle *handle,
				   struct perf_sample_data *data)
{}

/*
 * A set of common sample data types saved even for non-sample records
 * when event->attr.sample_id_all is set.
 */
#define PERF_SAMPLE_ID_ALL

static void __perf_event_header__init_id(struct perf_sample_data *data,
					 struct perf_event *event,
					 u64 sample_type)
{}

void perf_event_header__init_id(struct perf_event_header *header,
				struct perf_sample_data *data,
				struct perf_event *event)
{}

static void __perf_event__output_id_sample(struct perf_output_handle *handle,
					   struct perf_sample_data *data)
{}

void perf_event__output_id_sample(struct perf_event *event,
				  struct perf_output_handle *handle,
				  struct perf_sample_data *sample)
{}

static void perf_output_read_one(struct perf_output_handle *handle,
				 struct perf_event *event,
				 u64 enabled, u64 running)
{}

static void perf_output_read_group(struct perf_output_handle *handle,
			    struct perf_event *event,
			    u64 enabled, u64 running)
{}

#define PERF_FORMAT_TOTAL_TIMES

/*
 * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
 *
 * The problem is that its both hard and excessively expensive to iterate the
 * child list, not to mention that its impossible to IPI the children running
 * on another CPU, from interrupt/NMI context.
 */
static void perf_output_read(struct perf_output_handle *handle,
			     struct perf_event *event)
{}

void perf_output_sample(struct perf_output_handle *handle,
			struct perf_event_header *header,
			struct perf_sample_data *data,
			struct perf_event *event)
{}

static u64 perf_virt_to_phys(u64 virt)
{}

/*
 * Return the pagetable size of a given virtual address.
 */
static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr)
{}

static u64 perf_get_page_size(unsigned long addr)
{}

static struct perf_callchain_entry __empty_callchain =;

struct perf_callchain_entry *
perf_callchain(struct perf_event *event, struct pt_regs *regs)
{}

static __always_inline u64 __cond_set(u64 flags, u64 s, u64 d)
{}

void perf_prepare_sample(struct perf_sample_data *data,
			 struct perf_event *event,
			 struct pt_regs *regs)
{}

void perf_prepare_header(struct perf_event_header *header,
			 struct perf_sample_data *data,
			 struct perf_event *event,
			 struct pt_regs *regs)
{}

static __always_inline int
__perf_event_output(struct perf_event *event,
		    struct perf_sample_data *data,
		    struct pt_regs *regs,
		    int (*output_begin)(struct perf_output_handle *,
					struct perf_sample_data *,
					struct perf_event *,
					unsigned int))
{}

void
perf_event_output_forward(struct perf_event *event,
			 struct perf_sample_data *data,
			 struct pt_regs *regs)
{}

void
perf_event_output_backward(struct perf_event *event,
			   struct perf_sample_data *data,
			   struct pt_regs *regs)
{}

int
perf_event_output(struct perf_event *event,
		  struct perf_sample_data *data,
		  struct pt_regs *regs)
{}

/*
 * read event_id
 */

struct perf_read_event {};

static void
perf_event_read_event(struct perf_event *event,
			struct task_struct *task)
{}

perf_iterate_f;

static void
perf_iterate_ctx(struct perf_event_context *ctx,
		   perf_iterate_f output,
		   void *data, bool all)
{}

static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
{}

/*
 * Iterate all events that need to receive side-band events.
 *
 * For new callers; ensure that account_pmu_sb_event() includes
 * your event, otherwise it might not get delivered.
 */
static void
perf_iterate_sb(perf_iterate_f output, void *data,
	       struct perf_event_context *task_ctx)
{}

/*
 * Clear all file-based filters at exec, they'll have to be
 * re-instated when/if these objects are mmapped again.
 */
static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
{}

void perf_event_exec(void)
{}

struct remote_output {};

static void __perf_event_output_stop(struct perf_event *event, void *data)
{}

static int __perf_pmu_output_stop(void *info)
{}

static void perf_pmu_output_stop(struct perf_event *event)
{}

/*
 * task tracking -- fork/exit
 *
 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
 */

struct perf_task_event {};

static int perf_event_task_match(struct perf_event *event)
{}

static void perf_event_task_output(struct perf_event *event,
				   void *data)
{}

static void perf_event_task(struct task_struct *task,
			      struct perf_event_context *task_ctx,
			      int new)
{}

void perf_event_fork(struct task_struct *task)
{}

/*
 * comm tracking
 */

struct perf_comm_event {};

static int perf_event_comm_match(struct perf_event *event)
{}

static void perf_event_comm_output(struct perf_event *event,
				   void *data)
{}

static void perf_event_comm_event(struct perf_comm_event *comm_event)
{}

void perf_event_comm(struct task_struct *task, bool exec)
{}

/*
 * namespaces tracking
 */

struct perf_namespaces_event {};

static int perf_event_namespaces_match(struct perf_event *event)
{}

static void perf_event_namespaces_output(struct perf_event *event,
					 void *data)
{}

static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
				   struct task_struct *task,
				   const struct proc_ns_operations *ns_ops)
{}

void perf_event_namespaces(struct task_struct *task)
{}

/*
 * cgroup tracking
 */
#ifdef CONFIG_CGROUP_PERF

struct perf_cgroup_event {};

static int perf_event_cgroup_match(struct perf_event *event)
{}

static void perf_event_cgroup_output(struct perf_event *event, void *data)
{}

static void perf_event_cgroup(struct cgroup *cgrp)
{}

#endif

/*
 * mmap tracking
 */

struct perf_mmap_event {};

static int perf_event_mmap_match(struct perf_event *event,
				 void *data)
{}

static void perf_event_mmap_output(struct perf_event *event,
				   void *data)
{}

static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
{}

/*
 * Check whether inode and address range match filter criteria.
 */
static bool perf_addr_filter_match(struct perf_addr_filter *filter,
				     struct file *file, unsigned long offset,
				     unsigned long size)
{}

static bool perf_addr_filter_vma_adjust(struct perf_addr_filter *filter,
					struct vm_area_struct *vma,
					struct perf_addr_filter_range *fr)
{}

static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
{}

/*
 * Adjust all task's events' filters to the new vma
 */
static void perf_addr_filters_adjust(struct vm_area_struct *vma)
{}

void perf_event_mmap(struct vm_area_struct *vma)
{}

void perf_event_aux_event(struct perf_event *event, unsigned long head,
			  unsigned long size, u64 flags)
{}

/*
 * Lost/dropped samples logging
 */
void perf_log_lost_samples(struct perf_event *event, u64 lost)
{}

/*
 * context_switch tracking
 */

struct perf_switch_event {};

static int perf_event_switch_match(struct perf_event *event)
{}

static void perf_event_switch_output(struct perf_event *event, void *data)
{}

static void perf_event_switch(struct task_struct *task,
			      struct task_struct *next_prev, bool sched_in)
{}

/*
 * IRQ throttle logging
 */

static void perf_log_throttle(struct perf_event *event, int enable)
{}

/*
 * ksymbol register/unregister tracking
 */

struct perf_ksymbol_event {};

static int perf_event_ksymbol_match(struct perf_event *event)
{}

static void perf_event_ksymbol_output(struct perf_event *event, void *data)
{}

void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister,
			const char *sym)
{}

/*
 * bpf program load/unload tracking
 */

struct perf_bpf_event {};

static int perf_event_bpf_match(struct perf_event *event)
{}

static void perf_event_bpf_output(struct perf_event *event, void *data)
{}

static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
					 enum perf_bpf_event_type type)
{}

void perf_event_bpf_event(struct bpf_prog *prog,
			  enum perf_bpf_event_type type,
			  u16 flags)
{}

struct perf_text_poke_event {};

static int perf_event_text_poke_match(struct perf_event *event)
{}

static void perf_event_text_poke_output(struct perf_event *event, void *data)
{}

void perf_event_text_poke(const void *addr, const void *old_bytes,
			  size_t old_len, const void *new_bytes, size_t new_len)
{}

void perf_event_itrace_started(struct perf_event *event)
{}

static void perf_log_itrace_start(struct perf_event *event)
{}

void perf_report_aux_output_id(struct perf_event *event, u64 hw_id)
{}
EXPORT_SYMBOL_GPL();

static int
__perf_event_account_interrupt(struct perf_event *event, int throttle)
{}

int perf_event_account_interrupt(struct perf_event *event)
{}

static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
{}

#ifdef CONFIG_BPF_SYSCALL
static int bpf_overflow_handler(struct perf_event *event,
				struct perf_sample_data *data,
				struct pt_regs *regs)
{}

static inline int perf_event_set_bpf_handler(struct perf_event *event,
					     struct bpf_prog *prog,
					     u64 bpf_cookie)
{}

static inline void perf_event_free_bpf_handler(struct perf_event *event)
{}
#else
static inline int bpf_overflow_handler(struct perf_event *event,
				       struct perf_sample_data *data,
				       struct pt_regs *regs)
{
	return 1;
}

static inline int perf_event_set_bpf_handler(struct perf_event *event,
					     struct bpf_prog *prog,
					     u64 bpf_cookie)
{
	return -EOPNOTSUPP;
}

static inline void perf_event_free_bpf_handler(struct perf_event *event)
{
}
#endif

/*
 * Generic event overflow handling, sampling.
 */

static int __perf_event_overflow(struct perf_event *event,
				 int throttle, struct perf_sample_data *data,
				 struct pt_regs *regs)
{}

int perf_event_overflow(struct perf_event *event,
			struct perf_sample_data *data,
			struct pt_regs *regs)
{}

/*
 * Generic software event infrastructure
 */

struct swevent_htable {};
static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);

/*
 * We directly increment event->count and keep a second value in
 * event->hw.period_left to count intervals. This period event
 * is kept in the range [-sample_period, 0] so that we can use the
 * sign as trigger.
 */

u64 perf_swevent_set_period(struct perf_event *event)
{}

static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
				    struct perf_sample_data *data,
				    struct pt_regs *regs)
{}

static void perf_swevent_event(struct perf_event *event, u64 nr,
			       struct perf_sample_data *data,
			       struct pt_regs *regs)
{}

static int perf_exclude_event(struct perf_event *event,
			      struct pt_regs *regs)
{}

static int perf_swevent_match(struct perf_event *event,
				enum perf_type_id type,
				u32 event_id,
				struct perf_sample_data *data,
				struct pt_regs *regs)
{}

static inline u64 swevent_hash(u64 type, u32 event_id)
{}

static inline struct hlist_head *
__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
{}

/* For the read side: events when they trigger */
static inline struct hlist_head *
find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
{}

/* For the event head insertion and removal in the hlist */
static inline struct hlist_head *
find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
{}

static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
				    u64 nr,
				    struct perf_sample_data *data,
				    struct pt_regs *regs)
{}

DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);

int perf_swevent_get_recursion_context(void)
{}
EXPORT_SYMBOL_GPL();

void perf_swevent_put_recursion_context(int rctx)
{}

void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{}

void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{}

static void perf_swevent_read(struct perf_event *event)
{}

static int perf_swevent_add(struct perf_event *event, int flags)
{}

static void perf_swevent_del(struct perf_event *event, int flags)
{}

static void perf_swevent_start(struct perf_event *event, int flags)
{}

static void perf_swevent_stop(struct perf_event *event, int flags)
{}

/* Deref the hlist from the update side */
static inline struct swevent_hlist *
swevent_hlist_deref(struct swevent_htable *swhash)
{}

static void swevent_hlist_release(struct swevent_htable *swhash)
{}

static void swevent_hlist_put_cpu(int cpu)
{}

static void swevent_hlist_put(void)
{}

static int swevent_hlist_get_cpu(int cpu)
{}

static int swevent_hlist_get(void)
{}

struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];

static void sw_perf_event_destroy(struct perf_event *event)
{}

static struct pmu perf_cpu_clock; /* fwd declaration */
static struct pmu perf_task_clock;

static int perf_swevent_init(struct perf_event *event)
{}

static struct pmu perf_swevent =;

#ifdef CONFIG_EVENT_TRACING

static void tp_perf_event_destroy(struct perf_event *event)
{}

static int perf_tp_event_init(struct perf_event *event)
{}

static struct pmu perf_tracepoint =;

static int perf_tp_filter_match(struct perf_event *event,
				struct perf_sample_data *data)
{}

static int perf_tp_event_match(struct perf_event *event,
				struct perf_sample_data *data,
				struct pt_regs *regs)
{}

void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
			       struct trace_event_call *call, u64 count,
			       struct pt_regs *regs, struct hlist_head *head,
			       struct task_struct *task)
{}
EXPORT_SYMBOL_GPL();

static void __perf_tp_event_target_task(u64 count, void *record,
					struct pt_regs *regs,
					struct perf_sample_data *data,
					struct perf_event *event)
{}

static void perf_tp_event_target_task(u64 count, void *record,
				      struct pt_regs *regs,
				      struct perf_sample_data *data,
				      struct perf_event_context *ctx)
{}

void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
		   struct pt_regs *regs, struct hlist_head *head, int rctx,
		   struct task_struct *task)
{}
EXPORT_SYMBOL_GPL();

#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
/*
 * Flags in config, used by dynamic PMU kprobe and uprobe
 * The flags should match following PMU_FORMAT_ATTR().
 *
 * PERF_PROBE_CONFIG_IS_RETPROBE if set, create kretprobe/uretprobe
 *                               if not set, create kprobe/uprobe
 *
 * The following values specify a reference counter (or semaphore in the
 * terminology of tools like dtrace, systemtap, etc.) Userspace Statically
 * Defined Tracepoints (USDT). Currently, we use 40 bit for the offset.
 *
 * PERF_UPROBE_REF_CTR_OFFSET_BITS	# of bits in config as th offset
 * PERF_UPROBE_REF_CTR_OFFSET_SHIFT	# of bits to shift left
 */
enum perf_probe_config {};

PMU_FORMAT_ATTR();
#endif

#ifdef CONFIG_KPROBE_EVENTS
static struct attribute *kprobe_attrs[] =;

static struct attribute_group kprobe_format_group =;

static const struct attribute_group *kprobe_attr_groups[] =;

static int perf_kprobe_event_init(struct perf_event *event);
static struct pmu perf_kprobe =;

static int perf_kprobe_event_init(struct perf_event *event)
{}
#endif /* CONFIG_KPROBE_EVENTS */

#ifdef CONFIG_UPROBE_EVENTS
PMU_FORMAT_ATTR();

static struct attribute *uprobe_attrs[] =;

static struct attribute_group uprobe_format_group =;

static const struct attribute_group *uprobe_attr_groups[] =;

static int perf_uprobe_event_init(struct perf_event *event);
static struct pmu perf_uprobe =;

static int perf_uprobe_event_init(struct perf_event *event)
{}
#endif /* CONFIG_UPROBE_EVENTS */

static inline void perf_tp_register(void)
{}

static void perf_event_free_filter(struct perf_event *event)
{}

/*
 * returns true if the event is a tracepoint, or a kprobe/upprobe created
 * with perf_event_open()
 */
static inline bool perf_event_is_tracing(struct perf_event *event)
{}

int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
			    u64 bpf_cookie)
{}

void perf_event_free_bpf_prog(struct perf_event *event)
{}

#else

static inline void perf_tp_register(void)
{
}

static void perf_event_free_filter(struct perf_event *event)
{
}

int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
			    u64 bpf_cookie)
{
	return -ENOENT;
}

void perf_event_free_bpf_prog(struct perf_event *event)
{
}
#endif /* CONFIG_EVENT_TRACING */

#ifdef CONFIG_HAVE_HW_BREAKPOINT
void perf_bp_event(struct perf_event *bp, void *data)
{}
#endif

/*
 * Allocate a new address filter
 */
static struct perf_addr_filter *
perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
{}

static void free_filters_list(struct list_head *filters)
{}

/*
 * Free existing address filters and optionally install new ones
 */
static void perf_addr_filters_splice(struct perf_event *event,
				     struct list_head *head)
{}

/*
 * Scan through mm's vmas and see if one of them matches the
 * @filter; if so, adjust filter's address range.
 * Called with mm::mmap_lock down for reading.
 */
static void perf_addr_filter_apply(struct perf_addr_filter *filter,
				   struct mm_struct *mm,
				   struct perf_addr_filter_range *fr)
{}

/*
 * Update event's address range filters based on the
 * task's existing mappings, if any.
 */
static void perf_event_addr_filters_apply(struct perf_event *event)
{}

/*
 * Address range filtering: limiting the data to certain
 * instruction address ranges. Filters are ioctl()ed to us from
 * userspace as ascii strings.
 *
 * Filter string format:
 *
 * ACTION RANGE_SPEC
 * where ACTION is one of the
 *  * "filter": limit the trace to this region
 *  * "start": start tracing from this address
 *  * "stop": stop tracing at this address/region;
 * RANGE_SPEC is
 *  * for kernel addresses: <start address>[/<size>]
 *  * for object files:     <start address>[/<size>]@</path/to/object/file>
 *
 * if <size> is not specified or is zero, the range is treated as a single
 * address; not valid for ACTION=="filter".
 */
enum {};

enum {};

static const match_table_t if_tokens =;

/*
 * Address filter string parser
 */
static int
perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
			     struct list_head *filters)
{}

static int
perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
{}

static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{}

/*
 * hrtimer based swevent callback
 */

static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
{}

static void perf_swevent_start_hrtimer(struct perf_event *event)
{}

static void perf_swevent_cancel_hrtimer(struct perf_event *event)
{}

static void perf_swevent_init_hrtimer(struct perf_event *event)
{}

/*
 * Software event: cpu wall time clock
 */

static void cpu_clock_event_update(struct perf_event *event)
{}

static void cpu_clock_event_start(struct perf_event *event, int flags)
{}

static void cpu_clock_event_stop(struct perf_event *event, int flags)
{}

static int cpu_clock_event_add(struct perf_event *event, int flags)
{}

static void cpu_clock_event_del(struct perf_event *event, int flags)
{}

static void cpu_clock_event_read(struct perf_event *event)
{}

static int cpu_clock_event_init(struct perf_event *event)
{}

static struct pmu perf_cpu_clock =;

/*
 * Software event: task time clock
 */

static void task_clock_event_update(struct perf_event *event, u64 now)
{}

static void task_clock_event_start(struct perf_event *event, int flags)
{}

static void task_clock_event_stop(struct perf_event *event, int flags)
{}

static int task_clock_event_add(struct perf_event *event, int flags)
{}

static void task_clock_event_del(struct perf_event *event, int flags)
{}

static void task_clock_event_read(struct perf_event *event)
{}

static int task_clock_event_init(struct perf_event *event)
{}

static struct pmu perf_task_clock =;

static void perf_pmu_nop_void(struct pmu *pmu)
{}

static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
{}

static int perf_pmu_nop_int(struct pmu *pmu)
{}

static int perf_event_nop_int(struct perf_event *event, u64 value)
{}

static DEFINE_PER_CPU(unsigned int, nop_txn_flags);

static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
{}

static int perf_pmu_commit_txn(struct pmu *pmu)
{}

static void perf_pmu_cancel_txn(struct pmu *pmu)
{}

static int perf_event_idx_default(struct perf_event *event)
{}

static void free_pmu_context(struct pmu *pmu)
{}

/*
 * Let userspace know that this PMU supports address range filtering:
 */
static ssize_t nr_addr_filters_show(struct device *dev,
				    struct device_attribute *attr,
				    char *page)
{}
DEVICE_ATTR_RO();

static struct idr pmu_idr;

static ssize_t
type_show(struct device *dev, struct device_attribute *attr, char *page)
{}
static DEVICE_ATTR_RO(type);

static ssize_t
perf_event_mux_interval_ms_show(struct device *dev,
				struct device_attribute *attr,
				char *page)
{}

static DEFINE_MUTEX(mux_interval_mutex);

static ssize_t
perf_event_mux_interval_ms_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf, size_t count)
{}
static DEVICE_ATTR_RW(perf_event_mux_interval_ms);

static struct attribute *pmu_dev_attrs[] =;

static umode_t pmu_dev_is_visible(struct kobject *kobj, struct attribute *a, int n)
{}

static struct attribute_group pmu_dev_attr_group =;

static const struct attribute_group *pmu_dev_groups[] =;

static int pmu_bus_running;
static struct bus_type pmu_bus =;

static void pmu_dev_release(struct device *dev)
{}

static int pmu_dev_alloc(struct pmu *pmu)
{}

static struct lock_class_key cpuctx_mutex;
static struct lock_class_key cpuctx_lock;

int perf_pmu_register(struct pmu *pmu, const char *name, int type)
{}
EXPORT_SYMBOL_GPL();

void perf_pmu_unregister(struct pmu *pmu)
{}
EXPORT_SYMBOL_GPL();

static inline bool has_extended_regs(struct perf_event *event)
{}

static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
{}

static struct pmu *perf_init_event(struct perf_event *event)
{}

static void attach_sb_event(struct perf_event *event)
{}

/*
 * We keep a list of all !task (and therefore per-cpu) events
 * that need to receive side-band records.
 *
 * This avoids having to scan all the various PMU per-cpu contexts
 * looking for them.
 */
static void account_pmu_sb_event(struct perf_event *event)
{}

/* Freq events need the tick to stay alive (see perf_event_task_tick). */
static void account_freq_event_nohz(void)
{}

static void account_freq_event(void)
{}


static void account_event(struct perf_event *event)
{}

/*
 * Allocate and initialize an event structure
 */
static struct perf_event *
perf_event_alloc(struct perf_event_attr *attr, int cpu,
		 struct task_struct *task,
		 struct perf_event *group_leader,
		 struct perf_event *parent_event,
		 perf_overflow_handler_t overflow_handler,
		 void *context, int cgroup_fd)
{}

static int perf_copy_attr(struct perf_event_attr __user *uattr,
			  struct perf_event_attr *attr)
{}

static void mutex_lock_double(struct mutex *a, struct mutex *b)
{}

static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{}

static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
{}

static bool
perf_check_permission(struct perf_event_attr *attr, struct task_struct *task)
{}

/**
 * sys_perf_event_open - open a performance event, associate it to a task/cpu
 *
 * @attr_uptr:	event_id type attributes for monitoring/sampling
 * @pid:		target pid
 * @cpu:		target cpu
 * @group_fd:		group leader event fd
 * @flags:		perf event open flags
 */
SYSCALL_DEFINE5(perf_event_open,
		struct perf_event_attr __user *, attr_uptr,
		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
{}

/**
 * perf_event_create_kernel_counter
 *
 * @attr: attributes of the counter to create
 * @cpu: cpu in which the counter is bound
 * @task: task to profile (NULL for percpu)
 * @overflow_handler: callback to trigger when we hit the event
 * @context: context data could be used in overflow_handler callback
 */
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
				 struct task_struct *task,
				 perf_overflow_handler_t overflow_handler,
				 void *context)
{}
EXPORT_SYMBOL_GPL();

static void __perf_pmu_remove(struct perf_event_context *ctx,
			      int cpu, struct pmu *pmu,
			      struct perf_event_groups *groups,
			      struct list_head *events)
{}

static void __perf_pmu_install_event(struct pmu *pmu,
				     struct perf_event_context *ctx,
				     int cpu, struct perf_event *event)
{}

static void __perf_pmu_install(struct perf_event_context *ctx,
			       int cpu, struct pmu *pmu, struct list_head *events)
{}

void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
{}
EXPORT_SYMBOL_GPL();

static void sync_child_event(struct perf_event *child_event)
{}

static void
perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
{}

static void perf_event_exit_task_context(struct task_struct *child)
{}

/*
 * When a child task exits, feed back event values to parent events.
 *
 * Can be called with exec_update_lock held when called from
 * setup_new_exec().
 */
void perf_event_exit_task(struct task_struct *child)
{}

static void perf_free_event(struct perf_event *event,
			    struct perf_event_context *ctx)
{}

/*
 * Free a context as created by inheritance by perf_event_init_task() below,
 * used by fork() in case of fail.
 *
 * Even though the task has never lived, the context and events have been
 * exposed through the child_list, so we must take care tearing it all down.
 */
void perf_event_free_task(struct task_struct *task)
{}

void perf_event_delayed_put(struct task_struct *task)
{}

struct file *perf_event_get(unsigned int fd)
{}

const struct perf_event *perf_get_event(struct file *file)
{}

const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
{}

/*
 * Inherit an event from parent task to child task.
 *
 * Returns:
 *  - valid pointer on success
 *  - NULL for orphaned events
 *  - IS_ERR() on error
 */
static struct perf_event *
inherit_event(struct perf_event *parent_event,
	      struct task_struct *parent,
	      struct perf_event_context *parent_ctx,
	      struct task_struct *child,
	      struct perf_event *group_leader,
	      struct perf_event_context *child_ctx)
{}

/*
 * Inherits an event group.
 *
 * This will quietly suppress orphaned events; !inherit_event() is not an error.
 * This matches with perf_event_release_kernel() removing all child events.
 *
 * Returns:
 *  - 0 on success
 *  - <0 on error
 */
static int inherit_group(struct perf_event *parent_event,
	      struct task_struct *parent,
	      struct perf_event_context *parent_ctx,
	      struct task_struct *child,
	      struct perf_event_context *child_ctx)
{}

/*
 * Creates the child task context and tries to inherit the event-group.
 *
 * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
 * inherited_all set when we 'fail' to inherit an orphaned event; this is
 * consistent with perf_event_release_kernel() removing all child events.
 *
 * Returns:
 *  - 0 on success
 *  - <0 on error
 */
static int
inherit_task_group(struct perf_event *event, struct task_struct *parent,
		   struct perf_event_context *parent_ctx,
		   struct task_struct *child,
		   u64 clone_flags, int *inherited_all)
{}

/*
 * Initialize the perf_event context in task_struct
 */
static int perf_event_init_context(struct task_struct *child, u64 clone_flags)
{}

/*
 * Initialize the perf_event context in task_struct
 */
int perf_event_init_task(struct task_struct *child, u64 clone_flags)
{}

static void __init perf_event_init_all_cpus(void)
{}

static void perf_swevent_init_cpu(unsigned int cpu)
{}

#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
static void __perf_event_exit_context(void *__info)
{}

static void perf_event_exit_cpu_context(int cpu)
{}
#else

static void perf_event_exit_cpu_context(int cpu) { }

#endif

int perf_event_init_cpu(unsigned int cpu)
{}

int perf_event_exit_cpu(unsigned int cpu)
{}

static int
perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
{}

/*
 * Run the perf reboot notifier at the very last possible moment so that
 * the generic watchdog code runs as long as possible.
 */
static struct notifier_block perf_reboot_notifier =;

void __init perf_event_init(void)
{}

ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
			      char *page)
{}
EXPORT_SYMBOL_GPL();

static int __init perf_event_sysfs_init(void)
{}
device_initcall(perf_event_sysfs_init);

#ifdef CONFIG_CGROUP_PERF
static struct cgroup_subsys_state *
perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{}

static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
{}

static int perf_cgroup_css_online(struct cgroup_subsys_state *css)
{}

static int __perf_cgroup_move(void *info)
{}

static void perf_cgroup_attach(struct cgroup_taskset *tset)
{}

struct cgroup_subsys perf_event_cgrp_subsys =;
#endif /* CONFIG_CGROUP_PERF */

DEFINE_STATIC_CALL_RET0();