#include <linux/perf_event.h>
#include <asm/fpu/xstate.h>
#include <asm/intel_ds.h>
#include <asm/cpu.h>
enum extra_reg_type { … };
struct event_constraint { … };
static inline bool constraint_match(struct event_constraint *c, u64 ecode)
{ … }
#define PERF_ARCH …
enum { … };
#undef PERF_ARCH
#define PERF_ARCH …
#include "perf_event_flags.h"
#undef PERF_ARCH
static inline bool is_topdown_count(struct perf_event *event)
{ … }
static inline bool is_metric_event(struct perf_event *event)
{ … }
static inline bool is_slots_event(struct perf_event *event)
{ … }
static inline bool is_topdown_event(struct perf_event *event)
{ … }
static inline bool is_branch_counters_group(struct perf_event *event)
{ … }
struct amd_nb { … };
#define PEBS_COUNTER_MASK …
#define PEBS_PMI_AFTER_EACH_RECORD …
#define PEBS_OUTPUT_OFFSET …
#define PEBS_OUTPUT_MASK …
#define PEBS_OUTPUT_PT …
#define PEBS_VIA_PT_MASK …
#define LARGE_PEBS_FLAGS …
#define PEBS_GP_REGS …
struct er_account { … };
struct intel_shared_regs { … };
enum intel_excl_state_type { … };
struct intel_excl_states { … };
struct intel_excl_cntrs { … };
struct x86_perf_task_context;
#define MAX_LBR_ENTRIES …
enum { … };
enum { … };
struct cpu_hw_events { … };
#define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) …
#define __EVENT_CONSTRAINT(c, n, m, w, o, f) …
#define EVENT_CONSTRAINT(c, n, m) …
#define EVENT_CONSTRAINT_RANGE(c, e, n, m) …
#define INTEL_EXCLEVT_CONSTRAINT(c, n) …
#define EVENT_CONSTRAINT_OVERLAP(c, n, m) …
#define INTEL_EVENT_CONSTRAINT(c, n) …
#define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) …
#define FIXED_EVENT_FLAGS …
#define FIXED_EVENT_CONSTRAINT(c, n) …
#define METRIC_EVENT_CONSTRAINT(c, n) …
#define INTEL_UEVENT_CONSTRAINT(c, n) …
#define INTEL_UBIT_EVENT_CONSTRAINT(c, n) …
#define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) …
#define INTEL_EXCLUEVT_CONSTRAINT(c, n) …
#define INTEL_PLD_CONSTRAINT(c, n) …
#define INTEL_PSD_CONSTRAINT(c, n) …
#define INTEL_PST_CONSTRAINT(c, n) …
#define INTEL_HYBRID_LAT_CONSTRAINT(c, n) …
#define INTEL_HYBRID_LDLAT_CONSTRAINT(c, n) …
#define INTEL_HYBRID_STLAT_CONSTRAINT(c, n) …
#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) …
#define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) …
#define INTEL_ALL_EVENT_CONSTRAINT(code, n) …
#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) …
#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) …
#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) …
#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) …
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) …
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) …
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) …
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) …
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) …
#define EVENT_CONSTRAINT_END …
#define for_each_event_constraint(e, c) …
struct extra_reg { … };
#define EVENT_EXTRA_REG(e, ms, m, vm, i) …
#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) …
#define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) …
#define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) …
#define EVENT_EXTRA_END …
perf_capabilities;
struct x86_pmu_quirk { … };
x86_pmu_config;
#define X86_CONFIG(args...) …
enum { … };
#define PERF_PEBS_DATA_SOURCE_MAX …
#define PERF_PEBS_DATA_SOURCE_MASK …
#define PERF_PEBS_DATA_SOURCE_GRT_MAX …
#define PERF_PEBS_DATA_SOURCE_GRT_MASK …
enum hybrid_cpu_type { … };
enum hybrid_pmu_type { … };
#define X86_HYBRID_PMU_ATOM_IDX …
#define X86_HYBRID_PMU_CORE_IDX …
#define X86_HYBRID_NUM_PMUS …
struct x86_hybrid_pmu { … };
static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
{ … }
extern struct static_key_false perf_is_hybrid;
#define is_hybrid() …
#define hybrid(_pmu, _field) …
#define hybrid_var(_pmu, _var) …
#define hybrid_bit(_pmu, _field) …
struct x86_pmu { … };
struct x86_perf_task_context_opt { … };
struct x86_perf_task_context { … };
struct x86_perf_task_context_arch_lbr { … };
struct x86_perf_task_context_arch_lbr_xsave { … };
#define x86_add_quirk(func_) …
#define PMU_FL_NO_HT_SHARING …
#define PMU_FL_HAS_RSP_1 …
#define PMU_FL_EXCL_CNTRS …
#define PMU_FL_EXCL_ENABLED …
#define PMU_FL_PEBS_ALL …
#define PMU_FL_TFA …
#define PMU_FL_PAIR …
#define PMU_FL_INSTR_LATENCY …
#define PMU_FL_MEM_LOADS_AUX …
#define PMU_FL_RETIRE_LATENCY …
#define PMU_FL_BR_CNTR …
#define EVENT_VAR(_id) …
#define EVENT_PTR(_id) …
#define EVENT_ATTR(_name, _id) …
#define EVENT_ATTR_STR(_name, v, str) …
#define EVENT_ATTR_STR_HT(_name, v, noht, ht) …
#define EVENT_ATTR_STR_HYBRID(_name, v, str, _pmu) …
#define FORMAT_HYBRID_PTR(_id) …
#define FORMAT_ATTR_HYBRID(_name, _pmu) …
struct pmu *x86_get_pmu(unsigned int cpu);
extern struct x86_pmu x86_pmu __read_mostly;
DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period);
DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update);
static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
{ … }
static inline bool x86_pmu_has_lbr_callstack(void)
{ … }
DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
DECLARE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
int x86_perf_event_set_period(struct perf_event *event);
#define C(x) …
extern u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
extern u64 __read_mostly hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
u64 x86_perf_event_update(struct perf_event *event);
static inline unsigned int x86_pmu_config_addr(int index)
{ … }
static inline unsigned int x86_pmu_event_addr(int index)
{ … }
static inline unsigned int x86_pmu_fixed_ctr_addr(int index)
{ … }
static inline int x86_pmu_rdpmc_index(int index)
{ … }
bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
unsigned long *fixed_cntr_mask);
int x86_add_exclusive(unsigned int what);
void x86_del_exclusive(unsigned int what);
int x86_reserve_hardware(void);
void x86_release_hardware(void);
int x86_pmu_max_precise(void);
void hw_perf_lbr_event_destroy(struct perf_event *event);
int x86_setup_perfctr(struct perf_event *event);
int x86_pmu_hw_config(struct perf_event *event);
void x86_pmu_disable_all(void);
static inline bool has_amd_brs(struct hw_perf_event *hwc)
{ … }
static inline bool is_counter_pair(struct hw_perf_event *hwc)
{ … }
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
u64 enable_mask)
{ … }
void x86_pmu_enable_all(int added);
int perf_assign_events(struct event_constraint **constraints, int n,
int wmin, int wmax, int gpmax, int *assign);
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
void x86_pmu_stop(struct perf_event *event, int flags);
static inline void x86_pmu_disable_event(struct perf_event *event)
{ … }
void x86_pmu_enable_event(struct perf_event *event);
int x86_pmu_handle_irq(struct pt_regs *regs);
void x86_pmu_show_pmu_cap(struct pmu *pmu);
static inline int x86_pmu_num_counters(struct pmu *pmu)
{ … }
static inline int x86_pmu_max_num_counters(struct pmu *pmu)
{ … }
static inline int x86_pmu_num_counters_fixed(struct pmu *pmu)
{ … }
static inline int x86_pmu_max_num_counters_fixed(struct pmu *pmu)
{ … }
static inline u64 x86_pmu_get_event_config(struct perf_event *event)
{ … }
extern struct event_constraint emptyconstraint;
extern struct event_constraint unconstrained;
static inline bool kernel_ip(unsigned long ip)
{ … }
static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
{ … }
enum { … };
#define X86_BR_PLM …
#define X86_BR_ANYTX …
#define X86_BR_ANY …
#define X86_BR_ALL …
#define X86_BR_ANY_CALL …
int common_branch_type(int type);
int branch_type(unsigned long from, unsigned long to, int abort);
int branch_type_fused(unsigned long from, unsigned long to, int abort,
int *offset);
ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
ssize_t intel_event_sysfs_show(char *page, u64 config);
ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
ssize_t events_hybrid_sysfs_show(struct device *dev,
struct device_attribute *attr,
char *page);
static inline bool fixed_counter_disabled(int i, struct pmu *pmu)
{ … }
#ifdef CONFIG_CPU_SUP_AMD
int amd_pmu_init(void);
int amd_pmu_lbr_init(void);
void amd_pmu_lbr_reset(void);
void amd_pmu_lbr_read(void);
void amd_pmu_lbr_add(struct perf_event *event);
void amd_pmu_lbr_del(struct perf_event *event);
void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
void amd_pmu_lbr_enable_all(void);
void amd_pmu_lbr_disable_all(void);
int amd_pmu_lbr_hw_config(struct perf_event *event);
static __always_inline void __amd_pmu_lbr_disable(void)
{ … }
#ifdef CONFIG_PERF_EVENTS_AMD_BRS
#define AMD_FAM19H_BRS_EVENT …
int amd_brs_init(void);
void amd_brs_disable(void);
void amd_brs_enable(void);
void amd_brs_enable_all(void);
void amd_brs_disable_all(void);
void amd_brs_drain(void);
void amd_brs_lopwr_init(void);
int amd_brs_hw_config(struct perf_event *event);
void amd_brs_reset(void);
static inline void amd_pmu_brs_add(struct perf_event *event)
{ … }
static inline void amd_pmu_brs_del(struct perf_event *event)
{ … }
void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
#else
static inline int amd_brs_init(void)
{
return 0;
}
static inline void amd_brs_disable(void) {}
static inline void amd_brs_enable(void) {}
static inline void amd_brs_drain(void) {}
static inline void amd_brs_lopwr_init(void) {}
static inline void amd_brs_disable_all(void) {}
static inline int amd_brs_hw_config(struct perf_event *event)
{
return 0;
}
static inline void amd_brs_reset(void) {}
static inline void amd_pmu_brs_add(struct perf_event *event)
{
}
static inline void amd_pmu_brs_del(struct perf_event *event)
{
}
static inline void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{
}
static inline void amd_brs_enable_all(void)
{
}
#endif
#else
static inline int amd_pmu_init(void)
{
return 0;
}
static inline int amd_brs_init(void)
{
return -EOPNOTSUPP;
}
static inline void amd_brs_drain(void)
{
}
static inline void amd_brs_enable_all(void)
{
}
static inline void amd_brs_disable_all(void)
{
}
#endif
static inline int is_pebs_pt(struct perf_event *event)
{ … }
#ifdef CONFIG_CPU_SUP_INTEL
static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
{ … }
static inline bool intel_pmu_has_bts(struct perf_event *event)
{ … }
static __always_inline void __intel_pmu_pebs_disable_all(void)
{ … }
static __always_inline void __intel_pmu_arch_lbr_disable(void)
{ … }
static __always_inline void __intel_pmu_lbr_disable(void)
{ … }
int intel_pmu_save_and_restart(struct perf_event *event);
struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event);
extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
int intel_pmu_init(void);
void init_debug_store_on_cpu(int cpu);
void fini_debug_store_on_cpu(int cpu);
void release_ds_buffers(void);
void reserve_ds_buffers(void);
void release_lbr_buffers(void);
void reserve_lbr_buffers(void);
extern struct event_constraint bts_constraint;
extern struct event_constraint vlbr_constraint;
void intel_pmu_enable_bts(u64 config);
void intel_pmu_disable_bts(void);
int intel_pmu_drain_bts_buffer(void);
u64 grt_latency_data(struct perf_event *event, u64 status);
u64 cmt_latency_data(struct perf_event *event, u64 status);
u64 lnl_latency_data(struct perf_event *event, u64 status);
extern struct event_constraint intel_core2_pebs_event_constraints[];
extern struct event_constraint intel_atom_pebs_event_constraints[];
extern struct event_constraint intel_slm_pebs_event_constraints[];
extern struct event_constraint intel_glm_pebs_event_constraints[];
extern struct event_constraint intel_glp_pebs_event_constraints[];
extern struct event_constraint intel_grt_pebs_event_constraints[];
extern struct event_constraint intel_nehalem_pebs_event_constraints[];
extern struct event_constraint intel_westmere_pebs_event_constraints[];
extern struct event_constraint intel_snb_pebs_event_constraints[];
extern struct event_constraint intel_ivb_pebs_event_constraints[];
extern struct event_constraint intel_hsw_pebs_event_constraints[];
extern struct event_constraint intel_bdw_pebs_event_constraints[];
extern struct event_constraint intel_skl_pebs_event_constraints[];
extern struct event_constraint intel_icl_pebs_event_constraints[];
extern struct event_constraint intel_glc_pebs_event_constraints[];
extern struct event_constraint intel_lnc_pebs_event_constraints[];
struct event_constraint *intel_pebs_constraints(struct perf_event *event);
void intel_pmu_pebs_add(struct perf_event *event);
void intel_pmu_pebs_del(struct perf_event *event);
void intel_pmu_pebs_enable(struct perf_event *event);
void intel_pmu_pebs_disable(struct perf_event *event);
void intel_pmu_pebs_enable_all(void);
void intel_pmu_pebs_disable_all(void);
void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
void intel_pmu_auto_reload_read(struct perf_event *event);
void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
void intel_ds_init(void);
void intel_pmu_lbr_save_brstack(struct perf_sample_data *data,
struct cpu_hw_events *cpuc,
struct perf_event *event);
void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
struct perf_event_pmu_context *next_epc);
void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
u64 lbr_from_signext_quirk_wr(u64 val);
void intel_pmu_lbr_reset(void);
void intel_pmu_lbr_reset_32(void);
void intel_pmu_lbr_reset_64(void);
void intel_pmu_lbr_add(struct perf_event *event);
void intel_pmu_lbr_del(struct perf_event *event);
void intel_pmu_lbr_enable_all(bool pmi);
void intel_pmu_lbr_disable_all(void);
void intel_pmu_lbr_read(void);
void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc);
void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc);
void intel_pmu_lbr_save(void *ctx);
void intel_pmu_lbr_restore(void *ctx);
void intel_pmu_lbr_init_core(void);
void intel_pmu_lbr_init_nhm(void);
void intel_pmu_lbr_init_atom(void);
void intel_pmu_lbr_init_slm(void);
void intel_pmu_lbr_init_snb(void);
void intel_pmu_lbr_init_hsw(void);
void intel_pmu_lbr_init_skl(void);
void intel_pmu_lbr_init_knl(void);
void intel_pmu_lbr_init(void);
void intel_pmu_arch_lbr_init(void);
void intel_pmu_pebs_data_source_nhm(void);
void intel_pmu_pebs_data_source_skl(bool pmem);
void intel_pmu_pebs_data_source_adl(void);
void intel_pmu_pebs_data_source_grt(void);
void intel_pmu_pebs_data_source_mtl(void);
void intel_pmu_pebs_data_source_cmt(void);
void intel_pmu_pebs_data_source_lnl(void);
int intel_pmu_setup_lbr_filter(struct perf_event *event);
void intel_pt_interrupt(void);
int intel_bts_interrupt(void);
void intel_bts_enable_local(void);
void intel_bts_disable_local(void);
int p4_pmu_init(void);
int p6_pmu_init(void);
int knc_pmu_init(void);
static inline int is_ht_workaround_enabled(void)
{ … }
static inline u64 intel_pmu_pebs_mask(u64 cntr_mask)
{ … }
static inline int intel_pmu_max_num_pebs(struct pmu *pmu)
{ … }
#else
static inline void reserve_ds_buffers(void)
{
}
static inline void release_ds_buffers(void)
{
}
static inline void release_lbr_buffers(void)
{
}
static inline void reserve_lbr_buffers(void)
{
}
static inline int intel_pmu_init(void)
{
return 0;
}
static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
{
return 0;
}
static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
{
}
static inline int is_ht_workaround_enabled(void)
{
return 0;
}
#endif
#if ((defined CONFIG_CPU_SUP_CENTAUR) || (defined CONFIG_CPU_SUP_ZHAOXIN))
int zhaoxin_pmu_init(void);
#else
static inline int zhaoxin_pmu_init(void)
{
return 0;
}
#endif