#include <linux/perf_event.h>
#include <linux/types.h>
#include <asm/cpu_device_id.h>
#include <asm/perf_event.h>
#include <asm/msr.h>
#include "../perf_event.h"
#define LBR_KERNEL_BIT …
#define LBR_USER_BIT …
#define LBR_JCC_BIT …
#define LBR_REL_CALL_BIT …
#define LBR_IND_CALL_BIT …
#define LBR_RETURN_BIT …
#define LBR_IND_JMP_BIT …
#define LBR_REL_JMP_BIT …
#define LBR_FAR_BIT …
#define LBR_CALL_STACK_BIT …
#define LBR_NO_INFO_BIT …
#define LBR_KERNEL …
#define LBR_USER …
#define LBR_JCC …
#define LBR_REL_CALL …
#define LBR_IND_CALL …
#define LBR_RETURN …
#define LBR_REL_JMP …
#define LBR_IND_JMP …
#define LBR_FAR …
#define LBR_CALL_STACK …
#define LBR_NO_INFO …
#define LBR_PLM …
#define LBR_SEL_MASK …
#define LBR_NOT_SUPP …
#define LBR_IGN …
#define LBR_ANY …
#define LBR_FROM_FLAG_MISPRED …
#define LBR_FROM_FLAG_IN_TX …
#define LBR_FROM_FLAG_ABORT …
#define LBR_FROM_SIGNEXT_2MSB …
#define ARCH_LBR_KERNEL_BIT …
#define ARCH_LBR_USER_BIT …
#define ARCH_LBR_CALL_STACK_BIT …
#define ARCH_LBR_JCC_BIT …
#define ARCH_LBR_REL_JMP_BIT …
#define ARCH_LBR_IND_JMP_BIT …
#define ARCH_LBR_REL_CALL_BIT …
#define ARCH_LBR_IND_CALL_BIT …
#define ARCH_LBR_RETURN_BIT …
#define ARCH_LBR_OTHER_BRANCH_BIT …
#define ARCH_LBR_KERNEL …
#define ARCH_LBR_USER …
#define ARCH_LBR_CALL_STACK …
#define ARCH_LBR_JCC …
#define ARCH_LBR_REL_JMP …
#define ARCH_LBR_IND_JMP …
#define ARCH_LBR_REL_CALL …
#define ARCH_LBR_IND_CALL …
#define ARCH_LBR_RETURN …
#define ARCH_LBR_OTHER_BRANCH …
#define ARCH_LBR_ANY …
#define ARCH_LBR_CTL_MASK …
static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
static __always_inline bool is_lbr_call_stack_bit_set(u64 config)
{ … }
static void __intel_pmu_lbr_enable(bool pmi)
{ … }
void intel_pmu_lbr_reset_32(void)
{ … }
void intel_pmu_lbr_reset_64(void)
{ … }
static void intel_pmu_arch_lbr_reset(void)
{ … }
void intel_pmu_lbr_reset(void)
{ … }
static inline u64 intel_pmu_lbr_tos(void)
{ … }
enum { … };
static inline bool lbr_from_signext_quirk_needed(void)
{ … }
static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
inline u64 lbr_from_signext_quirk_wr(u64 val)
{ … }
static u64 lbr_from_signext_quirk_rd(u64 val)
{ … }
static __always_inline void wrlbr_from(unsigned int idx, u64 val)
{ … }
static __always_inline void wrlbr_to(unsigned int idx, u64 val)
{ … }
static __always_inline void wrlbr_info(unsigned int idx, u64 val)
{ … }
static __always_inline u64 rdlbr_from(unsigned int idx, struct lbr_entry *lbr)
{ … }
static __always_inline u64 rdlbr_to(unsigned int idx, struct lbr_entry *lbr)
{ … }
static __always_inline u64 rdlbr_info(unsigned int idx, struct lbr_entry *lbr)
{ … }
static inline void
wrlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
{ … }
static inline bool
rdlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
{ … }
void intel_pmu_lbr_restore(void *ctx)
{ … }
static void intel_pmu_arch_lbr_restore(void *ctx)
{ … }
static void intel_pmu_arch_lbr_xrstors(void *ctx)
{ … }
static __always_inline bool lbr_is_reset_in_cstate(void *ctx)
{ … }
static void __intel_pmu_lbr_restore(void *ctx)
{ … }
void intel_pmu_lbr_save(void *ctx)
{ … }
static void intel_pmu_arch_lbr_save(void *ctx)
{ … }
static void intel_pmu_arch_lbr_xsaves(void *ctx)
{ … }
static void __intel_pmu_lbr_save(void *ctx)
{ … }
void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
struct perf_event_pmu_context *next_epc)
{ … }
void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{ … }
static inline bool branch_user_callstack(unsigned br_sel)
{ … }
void intel_pmu_lbr_add(struct perf_event *event)
{ … }
void release_lbr_buffers(void)
{ … }
void reserve_lbr_buffers(void)
{ … }
void intel_pmu_lbr_del(struct perf_event *event)
{ … }
static inline bool vlbr_exclude_host(void)
{ … }
void intel_pmu_lbr_enable_all(bool pmi)
{ … }
void intel_pmu_lbr_disable_all(void)
{ … }
void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
{ … }
void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
{ … }
static DEFINE_STATIC_KEY_FALSE(x86_lbr_mispred);
static DEFINE_STATIC_KEY_FALSE(x86_lbr_cycles);
static DEFINE_STATIC_KEY_FALSE(x86_lbr_type);
static __always_inline int get_lbr_br_type(u64 info)
{ … }
static __always_inline bool get_lbr_mispred(u64 info)
{ … }
static __always_inline u16 get_lbr_cycles(u64 info)
{ … }
static_assert(…);
static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
struct lbr_entry *entries)
{ … }
static void intel_pmu_lbr_counters_reorder(struct cpu_hw_events *cpuc,
struct perf_event *event)
{ … }
void intel_pmu_lbr_save_brstack(struct perf_sample_data *data,
struct cpu_hw_events *cpuc,
struct perf_event *event)
{ … }
static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc)
{ … }
static void intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events *cpuc)
{ … }
void intel_pmu_lbr_read(void)
{ … }
static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
{ … }
static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
{ … }
int intel_pmu_setup_lbr_filter(struct perf_event *event)
{ … }
enum { … };
static const int arch_lbr_br_type_map[ARCH_LBR_BR_TYPE_MAP_MAX] = …;
static void
intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
{ … }
void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr)
{ … }
static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = …;
static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = …;
static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = …;
static int arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = …;
void __init intel_pmu_lbr_init_core(void)
{ … }
void __init intel_pmu_lbr_init_nhm(void)
{ … }
void __init intel_pmu_lbr_init_snb(void)
{ … }
static inline struct kmem_cache *
create_lbr_kmem_cache(size_t size, size_t align)
{ … }
void intel_pmu_lbr_init_hsw(void)
{ … }
__init void intel_pmu_lbr_init_skl(void)
{ … }
void __init intel_pmu_lbr_init_atom(void)
{ … }
void __init intel_pmu_lbr_init_slm(void)
{ … }
void intel_pmu_lbr_init_knl(void)
{ … }
void intel_pmu_lbr_init(void)
{ … }
static inline unsigned int get_lbr_state_size(void)
{ … }
static bool is_arch_lbr_xsave_available(void)
{ … }
void __init intel_pmu_arch_lbr_init(void)
{ … }
void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
{ … }
EXPORT_SYMBOL_GPL(…);
struct event_constraint vlbr_constraint = …;