#include <linux/perf_event.h>
#include <asm/perf_event.h>
#include "../perf_event.h"
#define LBR_SELECT_MASK …
#define LBR_SELECT_KERNEL …
#define LBR_SELECT_USER …
#define LBR_SELECT_JCC …
#define LBR_SELECT_CALL_NEAR_REL …
#define LBR_SELECT_CALL_NEAR_IND …
#define LBR_SELECT_RET_NEAR …
#define LBR_SELECT_JMP_NEAR_IND …
#define LBR_SELECT_JMP_NEAR_REL …
#define LBR_SELECT_FAR_BRANCH …
#define LBR_KERNEL …
#define LBR_USER …
#define LBR_JCC …
#define LBR_REL_CALL …
#define LBR_IND_CALL …
#define LBR_RETURN …
#define LBR_REL_JMP …
#define LBR_IND_JMP …
#define LBR_FAR …
#define LBR_NOT_SUPP …
#define LBR_IGNORE …
#define LBR_ANY …
struct branch_entry { … };
static __always_inline void amd_pmu_lbr_set_from(unsigned int idx, u64 val)
{ … }
static __always_inline void amd_pmu_lbr_set_to(unsigned int idx, u64 val)
{ … }
static __always_inline u64 amd_pmu_lbr_get_from(unsigned int idx)
{ … }
static __always_inline u64 amd_pmu_lbr_get_to(unsigned int idx)
{ … }
static __always_inline u64 sign_ext_branch_ip(u64 ip)
{ … }
static void amd_pmu_lbr_filter(void)
{ … }
static const int lbr_spec_map[PERF_BR_SPEC_MAX] = …;
void amd_pmu_lbr_read(void)
{ … }
static const int lbr_select_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = …;
static int amd_pmu_lbr_setup_filter(struct perf_event *event)
{ … }
int amd_pmu_lbr_hw_config(struct perf_event *event)
{ … }
void amd_pmu_lbr_reset(void)
{ … }
void amd_pmu_lbr_add(struct perf_event *event)
{ … }
void amd_pmu_lbr_del(struct perf_event *event)
{ … }
void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{ … }
void amd_pmu_lbr_enable_all(void)
{ … }
void amd_pmu_lbr_disable_all(void)
{ … }
__init int amd_pmu_lbr_init(void)
{ … }