#include <linux/perf_event.h>
#include <linux/slab.h>
#include <linux/sched/task_stack.h>
#include <linux/uprobes.h>
#include "internal.h"
struct callchain_cpus_entries { … };
int sysctl_perf_event_max_stack __read_mostly = …;
int sysctl_perf_event_max_contexts_per_stack __read_mostly = …;
static inline size_t perf_callchain_entry__sizeof(void)
{ … }
static DEFINE_PER_CPU(u8, callchain_recursion[PERF_NR_CONTEXTS]);
static atomic_t nr_callchain_events;
static DEFINE_MUTEX(callchain_mutex);
static struct callchain_cpus_entries *callchain_cpus_entries;
__weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{ … }
__weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{ … }
static void release_callchain_buffers_rcu(struct rcu_head *head)
{ … }
static void release_callchain_buffers(void)
{ … }
static int alloc_callchain_buffers(void)
{ … }
int get_callchain_buffers(int event_max_stack)
{ … }
void put_callchain_buffers(void)
{ … }
struct perf_callchain_entry *get_callchain_entry(int *rctx)
{ … }
void
put_callchain_entry(int rctx)
{ … }
static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entry,
int start_entry_idx)
{ … }
struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
u32 max_stack, bool crosstask, bool add_mark)
{ … }
int perf_event_max_stack_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{ … }