#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/idr.h>
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/tick.h>
#include <linux/sysfs.h>
#include <linux/dcache.h>
#include <linux/percpu.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
#include <linux/vmstat.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/hugetlb.h>
#include <linux/rculist.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
#include <linux/cgroup.h>
#include <linux/perf_event.h>
#include <linux/trace_events.h>
#include <linux/hw_breakpoint.h>
#include <linux/mm_types.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/compat.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/namei.h>
#include <linux/parser.h>
#include <linux/sched/clock.h>
#include <linux/sched/mm.h>
#include <linux/proc_ns.h>
#include <linux/mount.h>
#include <linux/min_heap.h>
#include <linux/highmem.h>
#include <linux/pgtable.h>
#include <linux/buildid.h>
#include <linux/task_work.h>
#include "internal.h"
#include <asm/irq_regs.h>
remote_function_f;
struct remote_function_call { … };
static void remote_function(void *data)
{ … }
static int
task_function_call(struct task_struct *p, remote_function_f func, void *info)
{ … }
static int cpu_function_call(int cpu, remote_function_f func, void *info)
{ … }
static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{ … }
static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{ … }
#define TASK_TOMBSTONE …
static bool is_kernel_event(struct perf_event *event)
{ … }
static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
struct perf_event_context *perf_cpu_task_ctx(void)
{ … }
event_f;
struct event_function_struct { … };
static int event_function(void *info)
{ … }
static void event_function_call(struct perf_event *event, event_f func, void *data)
{ … }
static void event_function_local(struct perf_event *event, event_f func, void *data)
{ … }
#define PERF_FLAG_ALL …
#define PERF_SAMPLE_BRANCH_PERM_PLM …
enum event_type_t { … };
static void perf_sched_delayed(struct work_struct *work);
DEFINE_STATIC_KEY_FALSE(perf_sched_events);
static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
static DEFINE_MUTEX(perf_sched_mutex);
static atomic_t perf_sched_count;
static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_namespaces_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
static atomic_t nr_freq_events __read_mostly;
static atomic_t nr_switch_events __read_mostly;
static atomic_t nr_ksymbol_events __read_mostly;
static atomic_t nr_bpf_events __read_mostly;
static atomic_t nr_cgroup_events __read_mostly;
static atomic_t nr_text_poke_events __read_mostly;
static atomic_t nr_build_id_events __read_mostly;
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;
static cpumask_var_t perf_online_mask;
static struct kmem_cache *perf_event_cache;
int sysctl_perf_event_paranoid __read_mostly = …;
int sysctl_perf_event_mlock __read_mostly = …;
#define DEFAULT_MAX_SAMPLE_RATE …
#define DEFAULT_SAMPLE_PERIOD_NS …
#define DEFAULT_CPU_TIME_MAX_PERCENT …
int sysctl_perf_event_sample_rate __read_mostly = …;
static int max_samples_per_tick __read_mostly = …;
static int perf_sample_period_ns __read_mostly = …;
static int perf_sample_allowed_ns __read_mostly = …;
static void update_perf_cpu_limits(void)
{ … }
static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc);
int perf_event_max_sample_rate_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{ … }
int sysctl_perf_cpu_time_max_percent __read_mostly = …;
int perf_cpu_time_max_percent_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{ … }
#define NR_ACCUMULATED_SAMPLES …
static DEFINE_PER_CPU(u64, running_sample_length);
static u64 __report_avg;
static u64 __report_allowed;
static void perf_duration_warn(struct irq_work *w)
{ … }
static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
void perf_sample_event_took(u64 sample_len_ns)
{ … }
static atomic64_t perf_event_id;
static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
void __weak perf_event_print_debug(void) { … }
static inline u64 perf_clock(void)
{ … }
static inline u64 perf_event_clock(struct perf_event *event)
{ … }
static __always_inline enum perf_event_state
__perf_effective_state(struct perf_event *event)
{ … }
static __always_inline void
__perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running)
{ … }
static void perf_event_update_time(struct perf_event *event)
{ … }
static void perf_event_update_sibling_time(struct perf_event *leader)
{ … }
static void
perf_event_set_state(struct perf_event *event, enum perf_event_state state)
{ … }
#define __store_release(ptr, val) …
#define __load_acquire(ptr) …
static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup)
{ … }
static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup)
{ … }
static void ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type);
static void ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type);
#ifdef CONFIG_CGROUP_PERF
static inline bool
perf_cgroup_match(struct perf_event *event)
{ … }
static inline void perf_detach_cgroup(struct perf_event *event)
{ … }
static inline int is_cgroup_event(struct perf_event *event)
{ … }
static inline u64 perf_cgroup_event_time(struct perf_event *event)
{ … }
static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
{ … }
static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, bool adv)
{ … }
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final)
{ … }
static inline void update_cgrp_time_from_event(struct perf_event *event)
{ … }
static inline void
perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
{ … }
static void perf_cgroup_switch(struct task_struct *task)
{ … }
static int perf_cgroup_ensure_storage(struct perf_event *event,
struct cgroup_subsys_state *css)
{ … }
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
struct perf_event_attr *attr,
struct perf_event *group_leader)
{ … }
static inline void
perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
{ … }
static inline void
perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
{ … }
#else
static inline bool
perf_cgroup_match(struct perf_event *event)
{
return true;
}
static inline void perf_detach_cgroup(struct perf_event *event)
{}
static inline int is_cgroup_event(struct perf_event *event)
{
return 0;
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
bool final)
{
}
static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
struct perf_event_attr *attr,
struct perf_event *group_leader)
{
return -EINVAL;
}
static inline void
perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
{
}
static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
return 0;
}
static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
{
return 0;
}
static inline void
perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
{
}
static inline void
perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
{
}
static void perf_cgroup_switch(struct task_struct *task)
{
}
#endif
#define PERF_CPU_HRTIMER …
static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
{ … }
static void __perf_mux_hrtimer_init(struct perf_cpu_pmu_context *cpc, int cpu)
{ … }
static int perf_mux_hrtimer_restart(struct perf_cpu_pmu_context *cpc)
{ … }
static int perf_mux_hrtimer_restart_ipi(void *arg)
{ … }
void perf_pmu_disable(struct pmu *pmu)
{ … }
void perf_pmu_enable(struct pmu *pmu)
{ … }
static void perf_assert_pmu_disabled(struct pmu *pmu)
{ … }
static void get_ctx(struct perf_event_context *ctx)
{ … }
static void *alloc_task_ctx_data(struct pmu *pmu)
{ … }
static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data)
{ … }
static void free_ctx(struct rcu_head *head)
{ … }
static void put_ctx(struct perf_event_context *ctx)
{ … }
static struct perf_event_context *
perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
{ … }
static inline struct perf_event_context *
perf_event_ctx_lock(struct perf_event *event)
{ … }
static void perf_event_ctx_unlock(struct perf_event *event,
struct perf_event_context *ctx)
{ … }
static __must_check struct perf_event_context *
unclone_ctx(struct perf_event_context *ctx)
{ … }
static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p,
enum pid_type type)
{ … }
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
{ … }
static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
{ … }
static u64 primary_event_id(struct perf_event *event)
{ … }
static struct perf_event_context *
perf_lock_task_context(struct task_struct *task, unsigned long *flags)
{ … }
static struct perf_event_context *
perf_pin_task_context(struct task_struct *task)
{ … }
static void perf_unpin_context(struct perf_event_context *ctx)
{ … }
static void __update_context_time(struct perf_event_context *ctx, bool adv)
{ … }
static void update_context_time(struct perf_event_context *ctx)
{ … }
static u64 perf_event_time(struct perf_event *event)
{ … }
static u64 perf_event_time_now(struct perf_event *event, u64 now)
{ … }
static enum event_type_t get_event_type(struct perf_event *event)
{ … }
static void init_event_group(struct perf_event *event)
{ … }
static struct perf_event_groups *
get_event_groups(struct perf_event *event, struct perf_event_context *ctx)
{ … }
static void perf_event_groups_init(struct perf_event_groups *groups)
{ … }
static inline struct cgroup *event_cgroup(const struct perf_event *event)
{ … }
static __always_inline int
perf_event_groups_cmp(const int left_cpu, const struct pmu *left_pmu,
const struct cgroup *left_cgroup, const u64 left_group_index,
const struct perf_event *right)
{ … }
#define __node_2_pe(node) …
static inline bool __group_less(struct rb_node *a, const struct rb_node *b)
{ … }
struct __group_key { … };
static inline int __group_cmp(const void *key, const struct rb_node *node)
{ … }
static inline int
__group_cmp_ignore_cgroup(const void *key, const struct rb_node *node)
{ … }
static void
perf_event_groups_insert(struct perf_event_groups *groups,
struct perf_event *event)
{ … }
static void
add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
{ … }
static void
perf_event_groups_delete(struct perf_event_groups *groups,
struct perf_event *event)
{ … }
static void
del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
{ … }
static struct perf_event *
perf_event_groups_first(struct perf_event_groups *groups, int cpu,
struct pmu *pmu, struct cgroup *cgrp)
{ … }
static struct perf_event *
perf_event_groups_next(struct perf_event *event, struct pmu *pmu)
{ … }
#define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) …
#define perf_event_groups_for_each(event, groups) …
static void
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
{ … }
static inline void perf_event__state_init(struct perf_event *event)
{ … }
static int __perf_event_read_size(u64 read_format, int nr_siblings)
{ … }
static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
{ … }
static void perf_event__header_size(struct perf_event *event)
{ … }
static void perf_event__id_header_size(struct perf_event *event)
{ … }
static bool perf_event_validate_size(struct perf_event *event)
{ … }
static void perf_group_attach(struct perf_event *event)
{ … }
static void
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
{ … }
static int
perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event)
{ … }
static void put_event(struct perf_event *event);
static void event_sched_out(struct perf_event *event,
struct perf_event_context *ctx);
static void perf_put_aux_event(struct perf_event *event)
{ … }
static bool perf_need_aux_event(struct perf_event *event)
{ … }
static int perf_get_aux_event(struct perf_event *event,
struct perf_event *group_leader)
{ … }
static inline struct list_head *get_event_list(struct perf_event *event)
{ … }
static inline void perf_remove_sibling_event(struct perf_event *event)
{ … }
static void perf_group_detach(struct perf_event *event)
{ … }
static void sync_child_event(struct perf_event *child_event);
static void perf_child_detach(struct perf_event *event)
{ … }
static bool is_orphaned_event(struct perf_event *event)
{ … }
static inline int
event_filter_match(struct perf_event *event)
{ … }
static void
event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
{ … }
static void
group_sched_out(struct perf_event *group_event, struct perf_event_context *ctx)
{ … }
#define DETACH_GROUP …
#define DETACH_CHILD …
#define DETACH_DEAD …
static void
__perf_remove_from_context(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
void *info)
{ … }
static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
{ … }
static void __perf_event_disable(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
void *info)
{ … }
static void _perf_event_disable(struct perf_event *event)
{ … }
void perf_event_disable_local(struct perf_event *event)
{ … }
void perf_event_disable(struct perf_event *event)
{ … }
EXPORT_SYMBOL_GPL(…);
void perf_event_disable_inatomic(struct perf_event *event)
{ … }
#define MAX_INTERRUPTS …
static void perf_log_throttle(struct perf_event *event, int enable);
static void perf_log_itrace_start(struct perf_event *event);
static int
event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
{ … }
static int
group_sched_in(struct perf_event *group_event, struct perf_event_context *ctx)
{ … }
static int group_can_go_on(struct perf_event *event, int can_add_hw)
{ … }
static void add_event_to_ctx(struct perf_event *event,
struct perf_event_context *ctx)
{ … }
static void task_ctx_sched_out(struct perf_event_context *ctx,
enum event_type_t event_type)
{ … }
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{ … }
static void ctx_resched(struct perf_cpu_context *cpuctx,
struct perf_event_context *task_ctx,
enum event_type_t event_type)
{ … }
void perf_pmu_resched(struct pmu *pmu)
{ … }
static int __perf_install_in_context(void *info)
{ … }
static bool exclusive_event_installable(struct perf_event *event,
struct perf_event_context *ctx);
static void
perf_install_in_context(struct perf_event_context *ctx,
struct perf_event *event,
int cpu)
{ … }
static void __perf_event_enable(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
void *info)
{ … }
static void _perf_event_enable(struct perf_event *event)
{ … }
void perf_event_enable(struct perf_event *event)
{ … }
EXPORT_SYMBOL_GPL(…);
struct stop_event_data { … };
static int __perf_event_stop(void *info)
{ … }
static int perf_event_stop(struct perf_event *event, int restart)
{ … }
void perf_event_addr_filters_sync(struct perf_event *event)
{ … }
EXPORT_SYMBOL_GPL(…);
static int _perf_event_refresh(struct perf_event *event, int refresh)
{ … }
int perf_event_refresh(struct perf_event *event, int refresh)
{ … }
EXPORT_SYMBOL_GPL(…);
static int perf_event_modify_breakpoint(struct perf_event *bp,
struct perf_event_attr *attr)
{ … }
static void perf_event_modify_copy_attr(struct perf_event_attr *to,
const struct perf_event_attr *from)
{ … }
static int perf_event_modify_attr(struct perf_event *event,
struct perf_event_attr *attr)
{ … }
static void __pmu_ctx_sched_out(struct perf_event_pmu_context *pmu_ctx,
enum event_type_t event_type)
{ … }
static void
ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
{ … }
static int context_equiv(struct perf_event_context *ctx1,
struct perf_event_context *ctx2)
{ … }
static void __perf_event_sync_stat(struct perf_event *event,
struct perf_event *next_event)
{ … }
static void perf_event_sync_stat(struct perf_event_context *ctx,
struct perf_event_context *next_ctx)
{ … }
#define double_list_for_each_entry(pos1, pos2, head1, head2, member) …
static void perf_event_swap_task_ctx_data(struct perf_event_context *prev_ctx,
struct perf_event_context *next_ctx)
{ … }
static void perf_ctx_sched_task_cb(struct perf_event_context *ctx, bool sched_in)
{ … }
static void
perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
{ … }
static DEFINE_PER_CPU(struct list_head, sched_cb_list);
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
void perf_sched_cb_dec(struct pmu *pmu)
{ … }
void perf_sched_cb_inc(struct pmu *pmu)
{ … }
static void __perf_pmu_sched_task(struct perf_cpu_pmu_context *cpc, bool sched_in)
{ … }
static void perf_pmu_sched_task(struct task_struct *prev,
struct task_struct *next,
bool sched_in)
{ … }
static void perf_event_switch(struct task_struct *task,
struct task_struct *next_prev, bool sched_in);
void __perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next)
{ … }
static bool perf_less_group_idx(const void *l, const void *r, void __always_unused *args)
{ … }
static void swap_ptr(void *l, void *r, void __always_unused *args)
{ … }
DEFINE_MIN_HEAP( … } ;
static const struct min_heap_callbacks perf_min_heap = …;
static void __heap_add(struct perf_event_min_heap *heap, struct perf_event *event)
{ … }
static void __link_epc(struct perf_event_pmu_context *pmu_ctx)
{ … }
static noinline int visit_groups_merge(struct perf_event_context *ctx,
struct perf_event_groups *groups, int cpu,
struct pmu *pmu,
int (*func)(struct perf_event *, void *),
void *data)
{ … }
static inline bool event_update_userpage(struct perf_event *event)
{ … }
static inline void group_update_userpage(struct perf_event *group_event)
{ … }
static int merge_sched_in(struct perf_event *event, void *data)
{ … }
static void pmu_groups_sched_in(struct perf_event_context *ctx,
struct perf_event_groups *groups,
struct pmu *pmu)
{ … }
static void ctx_groups_sched_in(struct perf_event_context *ctx,
struct perf_event_groups *groups,
bool cgroup)
{ … }
static void __pmu_ctx_sched_in(struct perf_event_context *ctx,
struct pmu *pmu)
{ … }
static void
ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
{ … }
static void perf_event_context_sched_in(struct task_struct *task)
{ … }
void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{ … }
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{ … }
static DEFINE_PER_CPU(int, perf_throttled_count);
static DEFINE_PER_CPU(u64, perf_throttled_seq);
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
{ … }
static void perf_adjust_freq_unthr_events(struct list_head *event_list)
{ … }
static void
perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
{ … }
static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
{ … }
static inline struct perf_event *
ctx_event_to_rotate(struct perf_event_pmu_context *pmu_ctx)
{ … }
static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc)
{ … }
void perf_event_task_tick(void)
{ … }
static int event_enable_on_exec(struct perf_event *event,
struct perf_event_context *ctx)
{ … }
static void perf_event_enable_on_exec(struct perf_event_context *ctx)
{ … }
static void perf_remove_from_owner(struct perf_event *event);
static void perf_event_exit_event(struct perf_event *event,
struct perf_event_context *ctx);
static void perf_event_remove_on_exec(struct perf_event_context *ctx)
{ … }
struct perf_read_data { … };
static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
{ … }
static void __perf_event_read(void *info)
{ … }
static inline u64 perf_event_count(struct perf_event *event)
{ … }
static void calc_timer_values(struct perf_event *event,
u64 *now,
u64 *enabled,
u64 *running)
{ … }
int perf_event_read_local(struct perf_event *event, u64 *value,
u64 *enabled, u64 *running)
{ … }
static int perf_event_read(struct perf_event *event, bool group)
{ … }
static void __perf_event_init_context(struct perf_event_context *ctx)
{ … }
static void
__perf_init_event_pmu_context(struct perf_event_pmu_context *epc, struct pmu *pmu)
{ … }
static struct perf_event_context *
alloc_perf_context(struct task_struct *task)
{ … }
static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)
{ … }
static struct perf_event_context *
find_get_context(struct task_struct *task, struct perf_event *event)
{ … }
static struct perf_event_pmu_context *
find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
struct perf_event *event)
{ … }
static void get_pmu_ctx(struct perf_event_pmu_context *epc)
{ … }
static void free_epc_rcu(struct rcu_head *head)
{ … }
static void put_pmu_ctx(struct perf_event_pmu_context *epc)
{ … }
static void perf_event_free_filter(struct perf_event *event);
static void free_event_rcu(struct rcu_head *head)
{ … }
static void ring_buffer_attach(struct perf_event *event,
struct perf_buffer *rb);
static void detach_sb_event(struct perf_event *event)
{ … }
static bool is_sb_event(struct perf_event *event)
{ … }
static void unaccount_pmu_sb_event(struct perf_event *event)
{ … }
#ifdef CONFIG_NO_HZ_FULL
static DEFINE_SPINLOCK(nr_freq_lock);
#endif
static void unaccount_freq_event_nohz(void)
{ … }
static void unaccount_freq_event(void)
{ … }
static void unaccount_event(struct perf_event *event)
{ … }
static void perf_sched_delayed(struct work_struct *work)
{ … }
static int exclusive_event_init(struct perf_event *event)
{ … }
static void exclusive_event_destroy(struct perf_event *event)
{ … }
static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
{ … }
static bool exclusive_event_installable(struct perf_event *event,
struct perf_event_context *ctx)
{ … }
static void perf_addr_filters_splice(struct perf_event *event,
struct list_head *head);
static void perf_pending_task_sync(struct perf_event *event)
{ … }
static void _free_event(struct perf_event *event)
{ … }
static void free_event(struct perf_event *event)
{ … }
static void perf_remove_from_owner(struct perf_event *event)
{ … }
static void put_event(struct perf_event *event)
{ … }
int perf_event_release_kernel(struct perf_event *event)
{ … }
EXPORT_SYMBOL_GPL(…);
static int perf_release(struct inode *inode, struct file *file)
{ … }
static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
{ … }
u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
{ … }
EXPORT_SYMBOL_GPL(…);
static int __perf_read_group_add(struct perf_event *leader,
u64 read_format, u64 *values)
{ … }
static int perf_read_group(struct perf_event *event,
u64 read_format, char __user *buf)
{ … }
static int perf_read_one(struct perf_event *event,
u64 read_format, char __user *buf)
{ … }
static bool is_event_hup(struct perf_event *event)
{ … }
static ssize_t
__perf_read(struct perf_event *event, char __user *buf, size_t count)
{ … }
static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{ … }
static __poll_t perf_poll(struct file *file, poll_table *wait)
{ … }
static void _perf_event_reset(struct perf_event *event)
{ … }
u64 perf_event_pause(struct perf_event *event, bool reset)
{ … }
EXPORT_SYMBOL_GPL(…);
static void perf_event_for_each_child(struct perf_event *event,
void (*func)(struct perf_event *))
{ … }
static void perf_event_for_each(struct perf_event *event,
void (*func)(struct perf_event *))
{ … }
static void __perf_event_period(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
void *info)
{ … }
static int perf_event_check_period(struct perf_event *event, u64 value)
{ … }
static int _perf_event_period(struct perf_event *event, u64 value)
{ … }
int perf_event_period(struct perf_event *event, u64 value)
{ … }
EXPORT_SYMBOL_GPL(…);
static const struct file_operations perf_fops;
static inline int perf_fget_light(int fd, struct fd *p)
{ … }
static int perf_event_set_output(struct perf_event *event,
struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
static int perf_copy_attr(struct perf_event_attr __user *uattr,
struct perf_event_attr *attr);
static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
{ … }
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{ … }
#ifdef CONFIG_COMPAT
static long perf_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{ … }
#else
#define perf_compat_ioctl …
#endif
int perf_event_task_enable(void)
{ … }
int perf_event_task_disable(void)
{ … }
static int perf_event_index(struct perf_event *event)
{ … }
static void perf_event_init_userpage(struct perf_event *event)
{ … }
void __weak arch_perf_update_userpage(
struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
{ … }
void perf_event_update_userpage(struct perf_event *event)
{ … }
EXPORT_SYMBOL_GPL(…);
static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
{ … }
static void ring_buffer_attach(struct perf_event *event,
struct perf_buffer *rb)
{ … }
static void ring_buffer_wakeup(struct perf_event *event)
{ … }
struct perf_buffer *ring_buffer_get(struct perf_event *event)
{ … }
void ring_buffer_put(struct perf_buffer *rb)
{ … }
static void perf_mmap_open(struct vm_area_struct *vma)
{ … }
static void perf_pmu_output_stop(struct perf_event *event);
static void perf_mmap_close(struct vm_area_struct *vma)
{ … }
static const struct vm_operations_struct perf_mmap_vmops = …;
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{ … }
static int perf_fasync(int fd, struct file *filp, int on)
{ … }
static const struct file_operations perf_fops = …;
void perf_event_wakeup(struct perf_event *event)
{ … }
static void perf_sigtrap(struct perf_event *event)
{ … }
static void __perf_pending_disable(struct perf_event *event)
{ … }
static void perf_pending_disable(struct irq_work *entry)
{ … }
static void perf_pending_irq(struct irq_work *entry)
{ … }
static void perf_pending_task(struct callback_head *head)
{ … }
#ifdef CONFIG_GUEST_PERF_EVENTS
struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
DEFINE_STATIC_CALL_RET0(…);
DEFINE_STATIC_CALL_RET0(…);
DEFINE_STATIC_CALL_RET0(…);
void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{ … }
EXPORT_SYMBOL_GPL(…);
void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{ … }
EXPORT_SYMBOL_GPL(…);
#endif
static void
perf_output_sample_regs(struct perf_output_handle *handle,
struct pt_regs *regs, u64 mask)
{ … }
static void perf_sample_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs)
{ … }
static void perf_sample_regs_intr(struct perf_regs *regs_intr,
struct pt_regs *regs)
{ … }
static u64 perf_ustack_task_size(struct pt_regs *regs)
{ … }
static u16
perf_sample_ustack_size(u16 stack_size, u16 header_size,
struct pt_regs *regs)
{ … }
static void
perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
struct pt_regs *regs)
{ … }
static unsigned long perf_prepare_sample_aux(struct perf_event *event,
struct perf_sample_data *data,
size_t size)
{ … }
static long perf_pmu_snapshot_aux(struct perf_buffer *rb,
struct perf_event *event,
struct perf_output_handle *handle,
unsigned long size)
{ … }
static void perf_aux_sample_output(struct perf_event *event,
struct perf_output_handle *handle,
struct perf_sample_data *data)
{ … }
#define PERF_SAMPLE_ID_ALL …
static void __perf_event_header__init_id(struct perf_sample_data *data,
struct perf_event *event,
u64 sample_type)
{ … }
void perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{ … }
static void __perf_event__output_id_sample(struct perf_output_handle *handle,
struct perf_sample_data *data)
{ … }
void perf_event__output_id_sample(struct perf_event *event,
struct perf_output_handle *handle,
struct perf_sample_data *sample)
{ … }
static void perf_output_read_one(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
{ … }
static void perf_output_read_group(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
{ … }
#define PERF_FORMAT_TOTAL_TIMES …
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
{ … }
void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{ … }
static u64 perf_virt_to_phys(u64 virt)
{ … }
static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr)
{ … }
static u64 perf_get_page_size(unsigned long addr)
{ … }
static struct perf_callchain_entry __empty_callchain = …;
struct perf_callchain_entry *
perf_callchain(struct perf_event *event, struct pt_regs *regs)
{ … }
static __always_inline u64 __cond_set(u64 flags, u64 s, u64 d)
{ … }
void perf_prepare_sample(struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs)
{ … }
void perf_prepare_header(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs)
{ … }
static __always_inline int
__perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs,
int (*output_begin)(struct perf_output_handle *,
struct perf_sample_data *,
struct perf_event *,
unsigned int))
{ … }
void
perf_event_output_forward(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{ … }
void
perf_event_output_backward(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{ … }
int
perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{ … }
struct perf_read_event { … };
static void
perf_event_read_event(struct perf_event *event,
struct task_struct *task)
{ … }
perf_iterate_f;
static void
perf_iterate_ctx(struct perf_event_context *ctx,
perf_iterate_f output,
void *data, bool all)
{ … }
static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
{ … }
static void
perf_iterate_sb(perf_iterate_f output, void *data,
struct perf_event_context *task_ctx)
{ … }
static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
{ … }
void perf_event_exec(void)
{ … }
struct remote_output { … };
static void __perf_event_output_stop(struct perf_event *event, void *data)
{ … }
static int __perf_pmu_output_stop(void *info)
{ … }
static void perf_pmu_output_stop(struct perf_event *event)
{ … }
struct perf_task_event { … };
static int perf_event_task_match(struct perf_event *event)
{ … }
static void perf_event_task_output(struct perf_event *event,
void *data)
{ … }
static void perf_event_task(struct task_struct *task,
struct perf_event_context *task_ctx,
int new)
{ … }
void perf_event_fork(struct task_struct *task)
{ … }
struct perf_comm_event { … };
static int perf_event_comm_match(struct perf_event *event)
{ … }
static void perf_event_comm_output(struct perf_event *event,
void *data)
{ … }
static void perf_event_comm_event(struct perf_comm_event *comm_event)
{ … }
void perf_event_comm(struct task_struct *task, bool exec)
{ … }
struct perf_namespaces_event { … };
static int perf_event_namespaces_match(struct perf_event *event)
{ … }
static void perf_event_namespaces_output(struct perf_event *event,
void *data)
{ … }
static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
struct task_struct *task,
const struct proc_ns_operations *ns_ops)
{ … }
void perf_event_namespaces(struct task_struct *task)
{ … }
#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup_event { … };
static int perf_event_cgroup_match(struct perf_event *event)
{ … }
static void perf_event_cgroup_output(struct perf_event *event, void *data)
{ … }
static void perf_event_cgroup(struct cgroup *cgrp)
{ … }
#endif
struct perf_mmap_event { … };
static int perf_event_mmap_match(struct perf_event *event,
void *data)
{ … }
static void perf_event_mmap_output(struct perf_event *event,
void *data)
{ … }
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
{ … }
static bool perf_addr_filter_match(struct perf_addr_filter *filter,
struct file *file, unsigned long offset,
unsigned long size)
{ … }
static bool perf_addr_filter_vma_adjust(struct perf_addr_filter *filter,
struct vm_area_struct *vma,
struct perf_addr_filter_range *fr)
{ … }
static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
{ … }
static void perf_addr_filters_adjust(struct vm_area_struct *vma)
{ … }
void perf_event_mmap(struct vm_area_struct *vma)
{ … }
void perf_event_aux_event(struct perf_event *event, unsigned long head,
unsigned long size, u64 flags)
{ … }
void perf_log_lost_samples(struct perf_event *event, u64 lost)
{ … }
struct perf_switch_event { … };
static int perf_event_switch_match(struct perf_event *event)
{ … }
static void perf_event_switch_output(struct perf_event *event, void *data)
{ … }
static void perf_event_switch(struct task_struct *task,
struct task_struct *next_prev, bool sched_in)
{ … }
static void perf_log_throttle(struct perf_event *event, int enable)
{ … }
struct perf_ksymbol_event { … };
static int perf_event_ksymbol_match(struct perf_event *event)
{ … }
static void perf_event_ksymbol_output(struct perf_event *event, void *data)
{ … }
void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister,
const char *sym)
{ … }
struct perf_bpf_event { … };
static int perf_event_bpf_match(struct perf_event *event)
{ … }
static void perf_event_bpf_output(struct perf_event *event, void *data)
{ … }
static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
enum perf_bpf_event_type type)
{ … }
void perf_event_bpf_event(struct bpf_prog *prog,
enum perf_bpf_event_type type,
u16 flags)
{ … }
struct perf_text_poke_event { … };
static int perf_event_text_poke_match(struct perf_event *event)
{ … }
static void perf_event_text_poke_output(struct perf_event *event, void *data)
{ … }
void perf_event_text_poke(const void *addr, const void *old_bytes,
size_t old_len, const void *new_bytes, size_t new_len)
{ … }
void perf_event_itrace_started(struct perf_event *event)
{ … }
static void perf_log_itrace_start(struct perf_event *event)
{ … }
void perf_report_aux_output_id(struct perf_event *event, u64 hw_id)
{ … }
EXPORT_SYMBOL_GPL(…);
static int
__perf_event_account_interrupt(struct perf_event *event, int throttle)
{ … }
int perf_event_account_interrupt(struct perf_event *event)
{ … }
static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
{ … }
#ifdef CONFIG_BPF_SYSCALL
static int bpf_overflow_handler(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{ … }
static inline int perf_event_set_bpf_handler(struct perf_event *event,
struct bpf_prog *prog,
u64 bpf_cookie)
{ … }
static inline void perf_event_free_bpf_handler(struct perf_event *event)
{ … }
#else
static inline int bpf_overflow_handler(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
return 1;
}
static inline int perf_event_set_bpf_handler(struct perf_event *event,
struct bpf_prog *prog,
u64 bpf_cookie)
{
return -EOPNOTSUPP;
}
static inline void perf_event_free_bpf_handler(struct perf_event *event)
{
}
#endif
static int __perf_event_overflow(struct perf_event *event,
int throttle, struct perf_sample_data *data,
struct pt_regs *regs)
{ … }
int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{ … }
struct swevent_htable { … };
static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
u64 perf_swevent_set_period(struct perf_event *event)
{ … }
static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
struct perf_sample_data *data,
struct pt_regs *regs)
{ … }
static void perf_swevent_event(struct perf_event *event, u64 nr,
struct perf_sample_data *data,
struct pt_regs *regs)
{ … }
static int perf_exclude_event(struct perf_event *event,
struct pt_regs *regs)
{ … }
static int perf_swevent_match(struct perf_event *event,
enum perf_type_id type,
u32 event_id,
struct perf_sample_data *data,
struct pt_regs *regs)
{ … }
static inline u64 swevent_hash(u64 type, u32 event_id)
{ … }
static inline struct hlist_head *
__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
{ … }
static inline struct hlist_head *
find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
{ … }
static inline struct hlist_head *
find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
{ … }
static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
u64 nr,
struct perf_sample_data *data,
struct pt_regs *regs)
{ … }
DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
int perf_swevent_get_recursion_context(void)
{ … }
EXPORT_SYMBOL_GPL(…);
void perf_swevent_put_recursion_context(int rctx)
{ … }
void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{ … }
void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{ … }
static void perf_swevent_read(struct perf_event *event)
{ … }
static int perf_swevent_add(struct perf_event *event, int flags)
{ … }
static void perf_swevent_del(struct perf_event *event, int flags)
{ … }
static void perf_swevent_start(struct perf_event *event, int flags)
{ … }
static void perf_swevent_stop(struct perf_event *event, int flags)
{ … }
static inline struct swevent_hlist *
swevent_hlist_deref(struct swevent_htable *swhash)
{ … }
static void swevent_hlist_release(struct swevent_htable *swhash)
{ … }
static void swevent_hlist_put_cpu(int cpu)
{ … }
static void swevent_hlist_put(void)
{ … }
static int swevent_hlist_get_cpu(int cpu)
{ … }
static int swevent_hlist_get(void)
{ … }
struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
static void sw_perf_event_destroy(struct perf_event *event)
{ … }
static struct pmu perf_cpu_clock;
static struct pmu perf_task_clock;
static int perf_swevent_init(struct perf_event *event)
{ … }
static struct pmu perf_swevent = …;
#ifdef CONFIG_EVENT_TRACING
static void tp_perf_event_destroy(struct perf_event *event)
{ … }
static int perf_tp_event_init(struct perf_event *event)
{ … }
static struct pmu perf_tracepoint = …;
static int perf_tp_filter_match(struct perf_event *event,
struct perf_sample_data *data)
{ … }
static int perf_tp_event_match(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{ … }
void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
struct trace_event_call *call, u64 count,
struct pt_regs *regs, struct hlist_head *head,
struct task_struct *task)
{ … }
EXPORT_SYMBOL_GPL(…);
static void __perf_tp_event_target_task(u64 count, void *record,
struct pt_regs *regs,
struct perf_sample_data *data,
struct perf_event *event)
{ … }
static void perf_tp_event_target_task(u64 count, void *record,
struct pt_regs *regs,
struct perf_sample_data *data,
struct perf_event_context *ctx)
{ … }
void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
struct pt_regs *regs, struct hlist_head *head, int rctx,
struct task_struct *task)
{ … }
EXPORT_SYMBOL_GPL(…);
#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
enum perf_probe_config { … };
PMU_FORMAT_ATTR(…);
#endif
#ifdef CONFIG_KPROBE_EVENTS
static struct attribute *kprobe_attrs[] = …;
static struct attribute_group kprobe_format_group = …;
static const struct attribute_group *kprobe_attr_groups[] = …;
static int perf_kprobe_event_init(struct perf_event *event);
static struct pmu perf_kprobe = …;
static int perf_kprobe_event_init(struct perf_event *event)
{ … }
#endif
#ifdef CONFIG_UPROBE_EVENTS
PMU_FORMAT_ATTR(…);
static struct attribute *uprobe_attrs[] = …;
static struct attribute_group uprobe_format_group = …;
static const struct attribute_group *uprobe_attr_groups[] = …;
static int perf_uprobe_event_init(struct perf_event *event);
static struct pmu perf_uprobe = …;
static int perf_uprobe_event_init(struct perf_event *event)
{ … }
#endif
static inline void perf_tp_register(void)
{ … }
static void perf_event_free_filter(struct perf_event *event)
{ … }
static inline bool perf_event_is_tracing(struct perf_event *event)
{ … }
int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
u64 bpf_cookie)
{ … }
void perf_event_free_bpf_prog(struct perf_event *event)
{ … }
#else
static inline void perf_tp_register(void)
{
}
static void perf_event_free_filter(struct perf_event *event)
{
}
int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
u64 bpf_cookie)
{
return -ENOENT;
}
void perf_event_free_bpf_prog(struct perf_event *event)
{
}
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
void perf_bp_event(struct perf_event *bp, void *data)
{ … }
#endif
static struct perf_addr_filter *
perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
{ … }
static void free_filters_list(struct list_head *filters)
{ … }
static void perf_addr_filters_splice(struct perf_event *event,
struct list_head *head)
{ … }
static void perf_addr_filter_apply(struct perf_addr_filter *filter,
struct mm_struct *mm,
struct perf_addr_filter_range *fr)
{ … }
static void perf_event_addr_filters_apply(struct perf_event *event)
{ … }
enum { … };
enum { … };
static const match_table_t if_tokens = …;
static int
perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
struct list_head *filters)
{ … }
static int
perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
{ … }
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{ … }
static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
{ … }
static void perf_swevent_start_hrtimer(struct perf_event *event)
{ … }
static void perf_swevent_cancel_hrtimer(struct perf_event *event)
{ … }
static void perf_swevent_init_hrtimer(struct perf_event *event)
{ … }
static void cpu_clock_event_update(struct perf_event *event)
{ … }
static void cpu_clock_event_start(struct perf_event *event, int flags)
{ … }
static void cpu_clock_event_stop(struct perf_event *event, int flags)
{ … }
static int cpu_clock_event_add(struct perf_event *event, int flags)
{ … }
static void cpu_clock_event_del(struct perf_event *event, int flags)
{ … }
static void cpu_clock_event_read(struct perf_event *event)
{ … }
static int cpu_clock_event_init(struct perf_event *event)
{ … }
static struct pmu perf_cpu_clock = …;
static void task_clock_event_update(struct perf_event *event, u64 now)
{ … }
static void task_clock_event_start(struct perf_event *event, int flags)
{ … }
static void task_clock_event_stop(struct perf_event *event, int flags)
{ … }
static int task_clock_event_add(struct perf_event *event, int flags)
{ … }
static void task_clock_event_del(struct perf_event *event, int flags)
{ … }
static void task_clock_event_read(struct perf_event *event)
{ … }
static int task_clock_event_init(struct perf_event *event)
{ … }
static struct pmu perf_task_clock = …;
static void perf_pmu_nop_void(struct pmu *pmu)
{ … }
static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
{ … }
static int perf_pmu_nop_int(struct pmu *pmu)
{ … }
static int perf_event_nop_int(struct perf_event *event, u64 value)
{ … }
static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
{ … }
static int perf_pmu_commit_txn(struct pmu *pmu)
{ … }
static void perf_pmu_cancel_txn(struct pmu *pmu)
{ … }
static int perf_event_idx_default(struct perf_event *event)
{ … }
static void free_pmu_context(struct pmu *pmu)
{ … }
static ssize_t nr_addr_filters_show(struct device *dev,
struct device_attribute *attr,
char *page)
{ … }
DEVICE_ATTR_RO(…);
static struct idr pmu_idr;
static ssize_t
type_show(struct device *dev, struct device_attribute *attr, char *page)
{ … }
static DEVICE_ATTR_RO(type);
static ssize_t
perf_event_mux_interval_ms_show(struct device *dev,
struct device_attribute *attr,
char *page)
{ … }
static DEFINE_MUTEX(mux_interval_mutex);
static ssize_t
perf_event_mux_interval_ms_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{ … }
static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
static struct attribute *pmu_dev_attrs[] = …;
static umode_t pmu_dev_is_visible(struct kobject *kobj, struct attribute *a, int n)
{ … }
static struct attribute_group pmu_dev_attr_group = …;
static const struct attribute_group *pmu_dev_groups[] = …;
static int pmu_bus_running;
static struct bus_type pmu_bus = …;
static void pmu_dev_release(struct device *dev)
{ … }
static int pmu_dev_alloc(struct pmu *pmu)
{ … }
static struct lock_class_key cpuctx_mutex;
static struct lock_class_key cpuctx_lock;
int perf_pmu_register(struct pmu *pmu, const char *name, int type)
{ … }
EXPORT_SYMBOL_GPL(…);
void perf_pmu_unregister(struct pmu *pmu)
{ … }
EXPORT_SYMBOL_GPL(…);
static inline bool has_extended_regs(struct perf_event *event)
{ … }
static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
{ … }
static struct pmu *perf_init_event(struct perf_event *event)
{ … }
static void attach_sb_event(struct perf_event *event)
{ … }
static void account_pmu_sb_event(struct perf_event *event)
{ … }
static void account_freq_event_nohz(void)
{ … }
static void account_freq_event(void)
{ … }
static void account_event(struct perf_event *event)
{ … }
static struct perf_event *
perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct task_struct *task,
struct perf_event *group_leader,
struct perf_event *parent_event,
perf_overflow_handler_t overflow_handler,
void *context, int cgroup_fd)
{ … }
static int perf_copy_attr(struct perf_event_attr __user *uattr,
struct perf_event_attr *attr)
{ … }
static void mutex_lock_double(struct mutex *a, struct mutex *b)
{ … }
static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{ … }
static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
{ … }
static bool
perf_check_permission(struct perf_event_attr *attr, struct task_struct *task)
{ … }
SYSCALL_DEFINE5(perf_event_open,
struct perf_event_attr __user *, attr_uptr,
pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
{ … }
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
struct task_struct *task,
perf_overflow_handler_t overflow_handler,
void *context)
{ … }
EXPORT_SYMBOL_GPL(…);
static void __perf_pmu_remove(struct perf_event_context *ctx,
int cpu, struct pmu *pmu,
struct perf_event_groups *groups,
struct list_head *events)
{ … }
static void __perf_pmu_install_event(struct pmu *pmu,
struct perf_event_context *ctx,
int cpu, struct perf_event *event)
{ … }
static void __perf_pmu_install(struct perf_event_context *ctx,
int cpu, struct pmu *pmu, struct list_head *events)
{ … }
void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static void sync_child_event(struct perf_event *child_event)
{ … }
static void
perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
{ … }
static void perf_event_exit_task_context(struct task_struct *child)
{ … }
void perf_event_exit_task(struct task_struct *child)
{ … }
static void perf_free_event(struct perf_event *event,
struct perf_event_context *ctx)
{ … }
void perf_event_free_task(struct task_struct *task)
{ … }
void perf_event_delayed_put(struct task_struct *task)
{ … }
struct file *perf_event_get(unsigned int fd)
{ … }
const struct perf_event *perf_get_event(struct file *file)
{ … }
const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
{ … }
static struct perf_event *
inherit_event(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event *group_leader,
struct perf_event_context *child_ctx)
{ … }
static int inherit_group(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event_context *child_ctx)
{ … }
static int
inherit_task_group(struct perf_event *event, struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
u64 clone_flags, int *inherited_all)
{ … }
static int perf_event_init_context(struct task_struct *child, u64 clone_flags)
{ … }
int perf_event_init_task(struct task_struct *child, u64 clone_flags)
{ … }
static void __init perf_event_init_all_cpus(void)
{ … }
static void perf_swevent_init_cpu(unsigned int cpu)
{ … }
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
static void __perf_event_exit_context(void *__info)
{ … }
static void perf_event_exit_cpu_context(int cpu)
{ … }
#else
static void perf_event_exit_cpu_context(int cpu) { }
#endif
int perf_event_init_cpu(unsigned int cpu)
{ … }
int perf_event_exit_cpu(unsigned int cpu)
{ … }
static int
perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
{ … }
static struct notifier_block perf_reboot_notifier = …;
void __init perf_event_init(void)
{ … }
ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page)
{ … }
EXPORT_SYMBOL_GPL(…);
static int __init perf_event_sysfs_init(void)
{ … }
device_initcall(perf_event_sysfs_init);
#ifdef CONFIG_CGROUP_PERF
static struct cgroup_subsys_state *
perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{ … }
static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
{ … }
static int perf_cgroup_css_online(struct cgroup_subsys_state *css)
{ … }
static int __perf_cgroup_move(void *info)
{ … }
static void perf_cgroup_attach(struct cgroup_taskset *tset)
{ … }
struct cgroup_subsys perf_event_cgrp_subsys = …;
#endif
DEFINE_STATIC_CALL_RET0(…);