#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/sched/clock.h>
#include <asm/cpu_entry_area.h>
#include <asm/debugreg.h>
#include <asm/perf_event.h>
#include <asm/tlbflush.h>
#include <asm/insn.h>
#include <asm/io.h>
#include <asm/timer.h>
#include "../perf_event.h"
DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
#define BTS_RECORD_SIZE …
#define PEBS_FIXUP_SIZE …
intel_x86_pebs_dse;
#define P(a, b) …
#define OP_LH …
#define LEVEL(x) …
#define REM …
#define SNOOP_NONE_MISS …
static u64 pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX] = …;
void __init intel_pmu_pebs_data_source_nhm(void)
{ … }
static void __init __intel_pmu_pebs_data_source_skl(bool pmem, u64 *data_source)
{ … }
void __init intel_pmu_pebs_data_source_skl(bool pmem)
{ … }
static void __init __intel_pmu_pebs_data_source_grt(u64 *data_source)
{ … }
void __init intel_pmu_pebs_data_source_grt(void)
{ … }
void __init intel_pmu_pebs_data_source_adl(void)
{ … }
static void __init __intel_pmu_pebs_data_source_cmt(u64 *data_source)
{ … }
void __init intel_pmu_pebs_data_source_mtl(void)
{ … }
void __init intel_pmu_pebs_data_source_cmt(void)
{ … }
static u64 lnc_pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX] = …;
void __init intel_pmu_pebs_data_source_lnl(void)
{ … }
static u64 precise_store_data(u64 status)
{ … }
static u64 precise_datala_hsw(struct perf_event *event, u64 status)
{ … }
static inline void pebs_set_tlb_lock(u64 *val, bool tlb, bool lock)
{ … }
static u64 __grt_latency_data(struct perf_event *event, u64 status,
u8 dse, bool tlb, bool lock, bool blk)
{ … }
u64 grt_latency_data(struct perf_event *event, u64 status)
{ … }
u64 cmt_latency_data(struct perf_event *event, u64 status)
{ … }
static u64 lnc_latency_data(struct perf_event *event, u64 status)
{ … }
u64 lnl_latency_data(struct perf_event *event, u64 status)
{ … }
static u64 load_latency_data(struct perf_event *event, u64 status)
{ … }
static u64 store_latency_data(struct perf_event *event, u64 status)
{ … }
struct pebs_record_core { … };
struct pebs_record_nhm { … };
struct pebs_record_hsw { … };
hsw_tsx_tuning;
#define PEBS_HSW_TSX_FLAGS …
struct pebs_record_skl { … };
void init_debug_store_on_cpu(int cpu)
{ … }
void fini_debug_store_on_cpu(int cpu)
{ … }
static DEFINE_PER_CPU(void *, insn_buffer);
static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
{ … }
static void ds_clear_cea(void *cea, size_t size)
{ … }
static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
{ … }
static void dsfree_pages(const void *buffer, size_t size)
{ … }
static int alloc_pebs_buffer(int cpu)
{ … }
static void release_pebs_buffer(int cpu)
{ … }
static int alloc_bts_buffer(int cpu)
{ … }
static void release_bts_buffer(int cpu)
{ … }
static int alloc_ds_buffer(int cpu)
{ … }
static void release_ds_buffer(int cpu)
{ … }
void release_ds_buffers(void)
{ … }
void reserve_ds_buffers(void)
{ … }
struct event_constraint bts_constraint = …;
void intel_pmu_enable_bts(u64 config)
{ … }
void intel_pmu_disable_bts(void)
{ … }
int intel_pmu_drain_bts_buffer(void)
{ … }
static inline void intel_pmu_drain_pebs_buffer(void)
{ … }
struct event_constraint intel_core2_pebs_event_constraints[] = …;
struct event_constraint intel_atom_pebs_event_constraints[] = …;
struct event_constraint intel_slm_pebs_event_constraints[] = …;
struct event_constraint intel_glm_pebs_event_constraints[] = …;
struct event_constraint intel_grt_pebs_event_constraints[] = …;
struct event_constraint intel_nehalem_pebs_event_constraints[] = …;
struct event_constraint intel_westmere_pebs_event_constraints[] = …;
struct event_constraint intel_snb_pebs_event_constraints[] = …;
struct event_constraint intel_ivb_pebs_event_constraints[] = …;
struct event_constraint intel_hsw_pebs_event_constraints[] = …;
struct event_constraint intel_bdw_pebs_event_constraints[] = …;
struct event_constraint intel_skl_pebs_event_constraints[] = …;
struct event_constraint intel_icl_pebs_event_constraints[] = …;
struct event_constraint intel_glc_pebs_event_constraints[] = …;
struct event_constraint intel_lnc_pebs_event_constraints[] = …;
struct event_constraint *intel_pebs_constraints(struct perf_event *event)
{ … }
static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
{ … }
void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{ … }
static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
{ … }
static void adaptive_pebs_record_size_update(void)
{ … }
#define PERF_PEBS_MEMINFO_TYPE …
static u64 pebs_update_adaptive_cfg(struct perf_event *event)
{ … }
static void
pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
struct perf_event *event, bool add)
{ … }
void intel_pmu_pebs_add(struct perf_event *event)
{ … }
static void intel_pmu_pebs_via_pt_disable(struct perf_event *event)
{ … }
static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
{ … }
static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
{ … }
void intel_pmu_pebs_enable(struct perf_event *event)
{ … }
void intel_pmu_pebs_del(struct perf_event *event)
{ … }
void intel_pmu_pebs_disable(struct perf_event *event)
{ … }
void intel_pmu_pebs_enable_all(void)
{ … }
void intel_pmu_pebs_disable_all(void)
{ … }
static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
{ … }
static inline u64 intel_get_tsx_weight(u64 tsx_tuning)
{ … }
static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax)
{ … }
static inline u64 get_pebs_status(void *n)
{ … }
#define PERF_X86_EVENT_PEBS_HSW_PREC …
static u64 get_data_src(struct perf_event *event, u64 aux)
{ … }
static void setup_pebs_time(struct perf_event *event,
struct perf_sample_data *data,
u64 tsc)
{ … }
#define PERF_SAMPLE_ADDR_TYPE …
static void setup_pebs_fixed_sample_data(struct perf_event *event,
struct pt_regs *iregs, void *__pebs,
struct perf_sample_data *data,
struct pt_regs *regs)
{ … }
static void adaptive_pebs_save_regs(struct pt_regs *regs,
struct pebs_gprs *gprs)
{ … }
#define PEBS_LATENCY_MASK …
#define PEBS_CACHE_LATENCY_OFFSET …
#define PEBS_RETIRE_LATENCY_OFFSET …
static void setup_pebs_adaptive_sample_data(struct perf_event *event,
struct pt_regs *iregs, void *__pebs,
struct perf_sample_data *data,
struct pt_regs *regs)
{ … }
static inline void *
get_next_pebs_record_by_bit(void *base, void *top, int bit)
{ … }
void intel_pmu_auto_reload_read(struct perf_event *event)
{ … }
static int
intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
{ … }
static __always_inline void
__intel_pmu_pebs_event(struct perf_event *event,
struct pt_regs *iregs,
struct perf_sample_data *data,
void *base, void *top,
int bit, int count,
void (*setup_sample)(struct perf_event *,
struct pt_regs *,
void *,
struct perf_sample_data *,
struct pt_regs *))
{ … }
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data)
{ … }
static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
{ … }
static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_data *data)
{ … }
static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data)
{ … }
void __init intel_ds_init(void)
{ … }
void perf_restore_debug_store(void)
{ … }