#include <linux/perf_event.h>
#include <linux/capability.h>
#include <linux/notifier.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/kdebug.h>
#include <linux/sched/mm.h>
#include <linux/sched/clock.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/nospec.h>
#include <linux/static_call.h>
#include <asm/apic.h>
#include <asm/stacktrace.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include <asm/alternative.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/timer.h>
#include <asm/desc.h>
#include <asm/ldt.h>
#include <asm/unwind.h>
#include "perf_event.h"
struct x86_pmu x86_pmu __read_mostly;
static struct pmu pmu;
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = …;
DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
DEFINE_STATIC_KEY_FALSE(perf_is_hybrid);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_NULL(…);
DEFINE_STATIC_CALL_RET0(…);
u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
u64 __read_mostly hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
u64 x86_perf_event_update(struct perf_event *event)
{ … }
static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
{ … }
static atomic_t active_events;
static atomic_t pmc_refcount;
static DEFINE_MUTEX(pmc_reserve_mutex);
#ifdef CONFIG_X86_LOCAL_APIC
static inline u64 get_possible_counter_mask(void)
{ … }
static bool reserve_pmc_hardware(void)
{ … }
static void release_pmc_hardware(void)
{ … }
#else
static bool reserve_pmc_hardware(void) { return true; }
static void release_pmc_hardware(void) {}
#endif
bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
unsigned long *fixed_cntr_mask)
{ … }
static void hw_perf_event_destroy(struct perf_event *event)
{ … }
void hw_perf_lbr_event_destroy(struct perf_event *event)
{ … }
static inline int x86_pmu_initialized(void)
{ … }
static inline int
set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
{ … }
int x86_reserve_hardware(void)
{ … }
void x86_release_hardware(void)
{ … }
int x86_add_exclusive(unsigned int what)
{ … }
void x86_del_exclusive(unsigned int what)
{ … }
int x86_setup_perfctr(struct perf_event *event)
{ … }
static inline int precise_br_compat(struct perf_event *event)
{ … }
int x86_pmu_max_precise(void)
{ … }
int x86_pmu_hw_config(struct perf_event *event)
{ … }
static int __x86_pmu_event_init(struct perf_event *event)
{ … }
void x86_pmu_disable_all(void)
{ … }
struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data)
{ … }
EXPORT_SYMBOL_GPL(…);
static void x86_pmu_disable(struct pmu *pmu)
{ … }
void x86_pmu_enable_all(int added)
{ … }
static inline int is_x86_event(struct perf_event *event)
{ … }
struct pmu *x86_get_pmu(unsigned int cpu)
{ … }
struct sched_state { … };
#define SCHED_STATES_MAX …
struct perf_sched { … };
static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
int num, int wmin, int wmax, int gpmax)
{ … }
static void perf_sched_save_state(struct perf_sched *sched)
{ … }
static bool perf_sched_restore_state(struct perf_sched *sched)
{ … }
static bool __perf_sched_find_counter(struct perf_sched *sched)
{ … }
static bool perf_sched_find_counter(struct perf_sched *sched)
{ … }
static bool perf_sched_next_event(struct perf_sched *sched)
{ … }
int perf_assign_events(struct event_constraint **constraints, int n,
int wmin, int wmax, int gpmax, int *assign)
{ … }
EXPORT_SYMBOL_GPL(…);
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{ … }
static int add_nr_metric_event(struct cpu_hw_events *cpuc,
struct perf_event *event)
{ … }
static void del_nr_metric_event(struct cpu_hw_events *cpuc,
struct perf_event *event)
{ … }
static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event,
int max_count, int n)
{ … }
static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
{ … }
static inline void x86_assign_hw_event(struct perf_event *event,
struct cpu_hw_events *cpuc, int i)
{ … }
int x86_perf_rdpmc_index(struct perf_event *event)
{ … }
static inline int match_prev_assignment(struct hw_perf_event *hwc,
struct cpu_hw_events *cpuc,
int i)
{ … }
static void x86_pmu_start(struct perf_event *event, int flags);
static void x86_pmu_enable(struct pmu *pmu)
{ … }
DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
int x86_perf_event_set_period(struct perf_event *event)
{ … }
void x86_pmu_enable_event(struct perf_event *event)
{ … }
static int x86_pmu_add(struct perf_event *event, int flags)
{ … }
static void x86_pmu_start(struct perf_event *event, int flags)
{ … }
void perf_event_print_debug(void)
{ … }
void x86_pmu_stop(struct perf_event *event, int flags)
{ … }
static void x86_pmu_del(struct perf_event *event, int flags)
{ … }
int x86_pmu_handle_irq(struct pt_regs *regs)
{ … }
void perf_events_lapic_init(void)
{ … }
static int
perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{ … }
NOKPROBE_SYMBOL(perf_event_nmi_handler);
struct event_constraint emptyconstraint;
struct event_constraint unconstrained;
static int x86_pmu_prepare_cpu(unsigned int cpu)
{ … }
static int x86_pmu_dead_cpu(unsigned int cpu)
{ … }
static int x86_pmu_online_cpu(unsigned int cpu)
{ … }
static int x86_pmu_starting_cpu(unsigned int cpu)
{ … }
static int x86_pmu_dying_cpu(unsigned int cpu)
{ … }
static void __init pmu_check_apic(void)
{ … }
static struct attribute_group x86_pmu_format_group __ro_after_init = …;
ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
{ … }
EXPORT_SYMBOL_GPL(…);
ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page)
{ … }
ssize_t events_hybrid_sysfs_show(struct device *dev,
struct device_attribute *attr,
char *page)
{ … }
EXPORT_SYMBOL_GPL(…);
EVENT_ATTR(cpu-cycles, CPU_CYCLES );
EVENT_ATTR(instructions, INSTRUCTIONS );
EVENT_ATTR(cache-references, CACHE_REFERENCES );
EVENT_ATTR(cache-misses, CACHE_MISSES );
EVENT_ATTR(branch-instructions, BRANCH_INSTRUCTIONS );
EVENT_ATTR(branch-misses, BRANCH_MISSES );
EVENT_ATTR(bus-cycles, BUS_CYCLES );
EVENT_ATTR(stalled-cycles-frontend, STALLED_CYCLES_FRONTEND );
EVENT_ATTR(stalled-cycles-backend, STALLED_CYCLES_BACKEND );
EVENT_ATTR(ref-cycles, REF_CPU_CYCLES );
static struct attribute *empty_attrs;
static struct attribute *events_attr[] = …;
static umode_t
is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{ … }
static struct attribute_group x86_pmu_events_group __ro_after_init = …;
ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
{ … }
static struct attribute_group x86_pmu_attr_group;
static struct attribute_group x86_pmu_caps_group;
static void x86_pmu_static_call_update(void)
{ … }
static void _x86_pmu_read(struct perf_event *event)
{ … }
void x86_pmu_show_pmu_cap(struct pmu *pmu)
{ … }
static int __init init_hw_perf_events(void)
{ … }
early_initcall(init_hw_perf_events);
static void x86_pmu_read(struct perf_event *event)
{ … }
static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
{ … }
static void x86_pmu_cancel_txn(struct pmu *pmu)
{ … }
static int x86_pmu_commit_txn(struct pmu *pmu)
{ … }
static void free_fake_cpuc(struct cpu_hw_events *cpuc)
{ … }
static struct cpu_hw_events *allocate_fake_cpuc(struct pmu *event_pmu)
{ … }
static int validate_event(struct perf_event *event)
{ … }
static int validate_group(struct perf_event *event)
{ … }
static int x86_pmu_event_init(struct perf_event *event)
{ … }
void perf_clear_dirty_counters(void)
{ … }
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{ … }
static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
{ … }
static int x86_pmu_event_idx(struct perf_event *event)
{ … }
static ssize_t get_attr_rdpmc(struct device *cdev,
struct device_attribute *attr,
char *buf)
{ … }
static ssize_t set_attr_rdpmc(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
{ … }
static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
static struct attribute *x86_pmu_attrs[] = …;
static struct attribute_group x86_pmu_attr_group __ro_after_init = …;
static ssize_t max_precise_show(struct device *cdev,
struct device_attribute *attr,
char *buf)
{ … }
static DEVICE_ATTR_RO(max_precise);
static struct attribute *x86_pmu_caps_attrs[] = …;
static struct attribute_group x86_pmu_caps_group __ro_after_init = …;
static const struct attribute_group *x86_pmu_attr_groups[] = …;
static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{ … }
static void x86_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
struct perf_event_pmu_context *next_epc)
{ … }
void perf_check_microcode(void)
{ … }
static int x86_pmu_check_period(struct perf_event *event, u64 value)
{ … }
static int x86_pmu_aux_output_match(struct perf_event *event)
{ … }
static bool x86_pmu_filter(struct pmu *pmu, int cpu)
{ … }
static struct pmu pmu = …;
void arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg, u64 now)
{ … }
static bool perf_hw_regs(struct pt_regs *regs)
{ … }
void
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ … }
static inline int
valid_user_frame(const void __user *fp, unsigned long size)
{ … }
static unsigned long get_segment_base(unsigned int segment)
{ … }
#ifdef CONFIG_IA32_EMULATION
#include <linux/compat.h>
static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
{ … }
#else
static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
{
return 0;
}
#endif
void
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ … }
static unsigned long code_segment_base(struct pt_regs *regs)
{ … }
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{ … }
unsigned long perf_misc_flags(struct pt_regs *regs)
{ … }
void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
{ … }
EXPORT_SYMBOL_GPL(…);
u64 perf_get_hw_event_config(int hw_event)
{ … }
EXPORT_SYMBOL_GPL(…);