#define pr_fmt(fmt) …
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/nmi.h>
#include <linux/kvm_host.h>
#include <asm/cpufeature.h>
#include <asm/debugreg.h>
#include <asm/hardirq.h>
#include <asm/intel-family.h>
#include <asm/intel_pt.h>
#include <asm/apic.h>
#include <asm/cpu_device_id.h>
#include "../perf_event.h"
static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = …;
static struct event_constraint intel_core_event_constraints[] __read_mostly = …;
static struct event_constraint intel_core2_event_constraints[] __read_mostly = …;
static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = …;
static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = …;
static struct event_constraint intel_westmere_event_constraints[] __read_mostly = …;
static struct event_constraint intel_snb_event_constraints[] __read_mostly = …;
static struct event_constraint intel_ivb_event_constraints[] __read_mostly = …;
static struct extra_reg intel_westmere_extra_regs[] __read_mostly = …;
static struct event_constraint intel_v1_event_constraints[] __read_mostly = …;
static struct event_constraint intel_gen_event_constraints[] __read_mostly = …;
static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly = …;
static struct event_constraint intel_slm_event_constraints[] __read_mostly = …;
static struct event_constraint intel_grt_event_constraints[] __read_mostly = …;
static struct event_constraint intel_skt_event_constraints[] __read_mostly = …;
static struct event_constraint intel_skl_event_constraints[] = …;
static struct extra_reg intel_knl_extra_regs[] __read_mostly = …;
static struct extra_reg intel_snb_extra_regs[] __read_mostly = …;
static struct extra_reg intel_snbep_extra_regs[] __read_mostly = …;
static struct extra_reg intel_skl_extra_regs[] __read_mostly = …;
static struct event_constraint intel_icl_event_constraints[] = …;
static struct extra_reg intel_icl_extra_regs[] __read_mostly = …;
static struct extra_reg intel_glc_extra_regs[] __read_mostly = …;
static struct event_constraint intel_glc_event_constraints[] = …;
static struct extra_reg intel_rwc_extra_regs[] __read_mostly = …;
static struct event_constraint intel_lnc_event_constraints[] = …;
EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
static struct attribute *nhm_mem_events_attrs[] = …;
EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
"event=0x3c,umask=0x0",
"event=0x3c,umask=0x0,any=1");
EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
"event=0xe,umask=0x1");
EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
"event=0xc2,umask=0x2");
EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
"event=0x9c,umask=0x1");
EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
"event=0xd,umask=0x3,cmask=1",
"event=0xd,umask=0x3,cmask=1,any=1");
EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
"4", "2");
EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4");
EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80");
EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81");
EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82");
EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83");
EVENT_ATTR_STR(topdown-heavy-ops, td_heavy_ops, "event=0x00,umask=0x84");
EVENT_ATTR_STR(topdown-br-mispredict, td_br_mispredict, "event=0x00,umask=0x85");
EVENT_ATTR_STR(topdown-fetch-lat, td_fetch_lat, "event=0x00,umask=0x86");
EVENT_ATTR_STR(topdown-mem-bound, td_mem_bound, "event=0x00,umask=0x87");
static struct attribute *snb_events_attrs[] = …;
static struct attribute *snb_mem_events_attrs[] = …;
static struct event_constraint intel_hsw_event_constraints[] = …;
static struct event_constraint intel_bdw_event_constraints[] = …;
static u64 intel_pmu_event_map(int hw_event)
{ … }
static __initconst const u64 glc_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
static __initconst const u64 glc_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
#define SKL_DEMAND_DATA_RD …
#define SKL_DEMAND_RFO …
#define SKL_ANY_RESPONSE …
#define SKL_SUPPLIER_NONE …
#define SKL_L3_MISS_LOCAL_DRAM …
#define SKL_L3_MISS_REMOTE_HOP0_DRAM …
#define SKL_L3_MISS_REMOTE_HOP1_DRAM …
#define SKL_L3_MISS_REMOTE_HOP2P_DRAM …
#define SKL_L3_MISS …
#define SKL_SPL_HIT …
#define SKL_SNOOP_NONE …
#define SKL_SNOOP_NOT_NEEDED …
#define SKL_SNOOP_MISS …
#define SKL_SNOOP_HIT_NO_FWD …
#define SKL_SNOOP_HIT_WITH_FWD …
#define SKL_SNOOP_HITM …
#define SKL_SNOOP_NON_DRAM …
#define SKL_ANY_SNOOP …
#define SKL_DEMAND_READ …
#define SKL_SNOOP_DRAM …
#define SKL_DEMAND_WRITE …
#define SKL_LLC_ACCESS …
#define SKL_L3_MISS_REMOTE …
static __initconst const u64 skl_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
static __initconst const u64 skl_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
#define SNB_DMND_DATA_RD …
#define SNB_DMND_RFO …
#define SNB_DMND_IFETCH …
#define SNB_DMND_WB …
#define SNB_PF_DATA_RD …
#define SNB_PF_RFO …
#define SNB_PF_IFETCH …
#define SNB_LLC_DATA_RD …
#define SNB_LLC_RFO …
#define SNB_LLC_IFETCH …
#define SNB_BUS_LOCKS …
#define SNB_STRM_ST …
#define SNB_OTHER …
#define SNB_RESP_ANY …
#define SNB_NO_SUPP …
#define SNB_LLC_HITM …
#define SNB_LLC_HITE …
#define SNB_LLC_HITS …
#define SNB_LLC_HITF …
#define SNB_LOCAL …
#define SNB_REMOTE …
#define SNB_SNP_NONE …
#define SNB_SNP_NOT_NEEDED …
#define SNB_SNP_MISS …
#define SNB_NO_FWD …
#define SNB_SNP_FWD …
#define SNB_HITM …
#define SNB_NON_DRAM …
#define SNB_DMND_READ …
#define SNB_DMND_WRITE …
#define SNB_DMND_PREFETCH …
#define SNB_SNP_ANY …
#define SNB_DRAM_ANY …
#define SNB_DRAM_REMOTE …
#define SNB_L3_ACCESS …
#define SNB_L3_MISS …
static __initconst const u64 snb_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
static __initconst const u64 snb_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
#define HSW_DEMAND_DATA_RD …
#define HSW_DEMAND_RFO …
#define HSW_ANY_RESPONSE …
#define HSW_SUPPLIER_NONE …
#define HSW_L3_MISS_LOCAL_DRAM …
#define HSW_L3_MISS_REMOTE_HOP0 …
#define HSW_L3_MISS_REMOTE_HOP1 …
#define HSW_L3_MISS_REMOTE_HOP2P …
#define HSW_L3_MISS …
#define HSW_SNOOP_NONE …
#define HSW_SNOOP_NOT_NEEDED …
#define HSW_SNOOP_MISS …
#define HSW_SNOOP_HIT_NO_FWD …
#define HSW_SNOOP_HIT_WITH_FWD …
#define HSW_SNOOP_HITM …
#define HSW_SNOOP_NON_DRAM …
#define HSW_ANY_SNOOP …
#define HSW_SNOOP_DRAM …
#define HSW_DEMAND_READ …
#define HSW_DEMAND_WRITE …
#define HSW_L3_MISS_REMOTE …
#define HSW_LLC_ACCESS …
#define BDW_L3_MISS_LOCAL …
#define BDW_L3_MISS …
static __initconst const u64 hsw_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
static __initconst const u64 hsw_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
static __initconst const u64 westmere_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
#define NHM_DMND_DATA_RD …
#define NHM_DMND_RFO …
#define NHM_DMND_IFETCH …
#define NHM_DMND_WB …
#define NHM_PF_DATA_RD …
#define NHM_PF_DATA_RFO …
#define NHM_PF_IFETCH …
#define NHM_OFFCORE_OTHER …
#define NHM_UNCORE_HIT …
#define NHM_OTHER_CORE_HIT_SNP …
#define NHM_OTHER_CORE_HITM …
#define NHM_REMOTE_CACHE_FWD …
#define NHM_REMOTE_DRAM …
#define NHM_LOCAL_DRAM …
#define NHM_NON_DRAM …
#define NHM_LOCAL …
#define NHM_REMOTE …
#define NHM_DMND_READ …
#define NHM_DMND_WRITE …
#define NHM_DMND_PREFETCH …
#define NHM_L3_HIT …
#define NHM_L3_MISS …
#define NHM_L3_ACCESS …
static __initconst const u64 nehalem_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
static __initconst const u64 nehalem_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
static __initconst const u64 core2_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
static __initconst const u64 atom_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
"event=0xca,umask=0x50");
EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
"event=0xc2,umask=0x10");
EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
"event=0xc2,umask=0x10");
static struct attribute *slm_events_attrs[] = …;
static struct extra_reg intel_slm_extra_regs[] __read_mostly = …;
#define SLM_DMND_READ …
#define SLM_DMND_WRITE …
#define SLM_DMND_PREFETCH …
#define SLM_SNP_ANY …
#define SLM_LLC_ACCESS …
#define SLM_LLC_MISS …
static __initconst const u64 slm_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
static __initconst const u64 slm_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
static struct attribute *glm_events_attrs[] = …;
static struct extra_reg intel_glm_extra_regs[] __read_mostly = …;
#define GLM_DEMAND_DATA_RD …
#define GLM_DEMAND_RFO …
#define GLM_ANY_RESPONSE …
#define GLM_SNP_NONE_OR_MISS …
#define GLM_DEMAND_READ …
#define GLM_DEMAND_WRITE …
#define GLM_DEMAND_PREFETCH …
#define GLM_LLC_ACCESS …
#define GLM_SNP_ANY …
#define GLM_LLC_MISS …
static __initconst const u64 glm_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
static __initconst const u64 glm_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
static __initconst const u64 glp_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
static __initconst const u64 glp_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
#define TNT_LOCAL_DRAM …
#define TNT_DEMAND_READ …
#define TNT_DEMAND_WRITE …
#define TNT_LLC_ACCESS …
#define TNT_SNP_ANY …
#define TNT_LLC_MISS …
static __initconst const u64 tnt_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0");
EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0");
EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6");
EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0");
static struct attribute *tnt_events_attrs[] = …;
static struct extra_reg intel_tnt_extra_regs[] __read_mostly = …;
EVENT_ATTR_STR(mem-loads, mem_ld_grt, "event=0xd0,umask=0x5,ldlat=3");
EVENT_ATTR_STR(mem-stores, mem_st_grt, "event=0xd0,umask=0x6");
static struct attribute *grt_mem_attrs[] = …;
static struct extra_reg intel_grt_extra_regs[] __read_mostly = …;
EVENT_ATTR_STR(topdown-retiring, td_retiring_cmt, "event=0x72,umask=0x0");
EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_cmt, "event=0x73,umask=0x0");
static struct attribute *cmt_events_attrs[] = …;
static struct extra_reg intel_cmt_extra_regs[] __read_mostly = …;
#define KNL_OT_L2_HITE …
#define KNL_OT_L2_HITF …
#define KNL_MCDRAM_LOCAL …
#define KNL_MCDRAM_FAR …
#define KNL_DDR_LOCAL …
#define KNL_DDR_FAR …
#define KNL_DRAM_ANY …
#define KNL_L2_READ …
#define KNL_L2_WRITE …
#define KNL_L2_PREFETCH …
#define KNL_L2_ACCESS …
#define KNL_L2_MISS …
static __initconst const u64 knl_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
static __always_inline void __intel_pmu_disable_all(bool bts)
{ … }
static __always_inline void intel_pmu_disable_all(void)
{ … }
static void __intel_pmu_enable_all(int added, bool pmi)
{ … }
static void intel_pmu_enable_all(int added)
{ … }
static noinline int
__intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries,
unsigned int cnt, unsigned long flags)
{ … }
static int
intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
{ … }
static int
intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
{ … }
static void intel_pmu_nhm_workaround(void)
{ … }
static void intel_pmu_nhm_enable_all(int added)
{ … }
static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
{ … }
static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
{ … }
static void intel_tfa_pmu_enable_all(int added)
{ … }
static inline u64 intel_pmu_get_status(void)
{ … }
static inline void intel_pmu_ack_status(u64 ack)
{ … }
static inline bool event_is_checkpointed(struct perf_event *event)
{ … }
static inline void intel_set_masks(struct perf_event *event, int idx)
{ … }
static inline void intel_clear_masks(struct perf_event *event, int idx)
{ … }
static void intel_pmu_disable_fixed(struct perf_event *event)
{ … }
static void intel_pmu_disable_event(struct perf_event *event)
{ … }
static void intel_pmu_assign_event(struct perf_event *event, int idx)
{ … }
static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event)
{ … }
static void intel_pmu_del_event(struct perf_event *event)
{ … }
static int icl_set_topdown_event_period(struct perf_event *event)
{ … }
DEFINE_STATIC_CALL(…);
static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
{ … }
static u64 icl_get_topdown_value(struct perf_event *event,
u64 slots, u64 metrics)
{ … }
static void __icl_update_topdown_event(struct perf_event *event,
u64 slots, u64 metrics,
u64 last_slots, u64 last_metrics)
{ … }
static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
u64 metrics, int metric_end)
{ … }
static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
{ … }
static u64 icl_update_topdown_event(struct perf_event *event)
{ … }
DEFINE_STATIC_CALL(…);
static void intel_pmu_read_topdown_event(struct perf_event *event)
{ … }
static void intel_pmu_read_event(struct perf_event *event)
{ … }
static void intel_pmu_enable_fixed(struct perf_event *event)
{ … }
static void intel_pmu_enable_event(struct perf_event *event)
{ … }
static void intel_pmu_add_event(struct perf_event *event)
{ … }
int intel_pmu_save_and_restart(struct perf_event *event)
{ … }
static int intel_pmu_set_period(struct perf_event *event)
{ … }
static u64 intel_pmu_update(struct perf_event *event)
{ … }
static void intel_pmu_reset(void)
{ … }
static void x86_pmu_handle_guest_pebs(struct pt_regs *regs,
struct perf_sample_data *data)
{ … }
static int handle_pmi_common(struct pt_regs *regs, u64 status)
{ … }
static int intel_pmu_handle_irq(struct pt_regs *regs)
{ … }
static struct event_constraint *
intel_bts_constraints(struct perf_event *event)
{ … }
static struct event_constraint *
intel_vlbr_constraints(struct perf_event *event)
{ … }
static int intel_alt_er(struct cpu_hw_events *cpuc,
int idx, u64 config)
{ … }
static void intel_fixup_er(struct perf_event *event, int idx)
{ … }
static struct event_constraint *
__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event,
struct hw_perf_event_extra *reg)
{ … }
static void
__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
struct hw_perf_event_extra *reg)
{ … }
static struct event_constraint *
intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event)
{ … }
struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{ … }
static struct event_constraint *
__intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{ … }
static void
intel_start_scheduling(struct cpu_hw_events *cpuc)
{ … }
static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
{ … }
static void
intel_stop_scheduling(struct cpu_hw_events *cpuc)
{ … }
static struct event_constraint *
dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
{ … }
static struct event_constraint *
intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
int idx, struct event_constraint *c)
{ … }
static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{ … }
static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event)
{ … }
static void
intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event)
{ … }
static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event)
{ … }
static void intel_pebs_aliases_core2(struct perf_event *event)
{ … }
static void intel_pebs_aliases_snb(struct perf_event *event)
{ … }
static void intel_pebs_aliases_precdist(struct perf_event *event)
{ … }
static void intel_pebs_aliases_ivb(struct perf_event *event)
{ … }
static void intel_pebs_aliases_skl(struct perf_event *event)
{ … }
static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
{ … }
static int intel_pmu_bts_config(struct perf_event *event)
{ … }
static int core_pmu_hw_config(struct perf_event *event)
{ … }
#define INTEL_TD_METRIC_AVAILABLE_MAX …
static bool is_available_metric_event(struct perf_event *event)
{ … }
static inline bool is_mem_loads_event(struct perf_event *event)
{ … }
static inline bool is_mem_loads_aux_event(struct perf_event *event)
{ … }
static inline bool require_mem_loads_aux_event(struct perf_event *event)
{ … }
static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
{ … }
static int intel_pmu_hw_config(struct perf_event *event)
{ … }
static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
{ … }
static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data)
{ … }
static void core_pmu_enable_event(struct perf_event *event)
{ … }
static void core_pmu_enable_all(int added)
{ … }
static int hsw_hw_config(struct perf_event *event)
{ … }
static struct event_constraint counter0_constraint = …;
static struct event_constraint counter1_constraint = …;
static struct event_constraint counter0_1_constraint = …;
static struct event_constraint counter2_constraint = …;
static struct event_constraint fixed0_constraint = …;
static struct event_constraint fixed0_counter0_constraint = …;
static struct event_constraint fixed0_counter0_1_constraint = …;
static struct event_constraint counters_1_7_constraint = …;
static struct event_constraint *
hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{ … }
static struct event_constraint *
icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{ … }
static struct event_constraint *
glc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{ … }
static struct event_constraint *
glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{ … }
static struct event_constraint *
tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{ … }
static bool allow_tsx_force_abort = …;
static struct event_constraint *
tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{ … }
static struct event_constraint *
adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{ … }
static struct event_constraint *
cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{ … }
static struct event_constraint *
rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{ … }
static struct event_constraint *
mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{ … }
static int adl_hw_config(struct perf_event *event)
{ … }
static enum hybrid_cpu_type adl_get_hybrid_cpu_type(void)
{ … }
static void bdw_limit_period(struct perf_event *event, s64 *left)
{ … }
static void nhm_limit_period(struct perf_event *event, s64 *left)
{ … }
static void glc_limit_period(struct perf_event *event, s64 *left)
{ … }
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
static ssize_t umask2_show(struct device *dev,
struct device_attribute *attr,
char *page)
{ … }
static struct device_attribute format_attr_umask2 = …;
static struct attribute *format_evtsel_ext_attrs[] = …;
static umode_t
evtsel_ext_is_visible(struct kobject *kobj, struct attribute *attr, int i)
{ … }
static struct attribute *intel_arch_formats_attr[] = …;
ssize_t intel_event_sysfs_show(char *page, u64 config)
{ … }
static struct intel_shared_regs *allocate_shared_regs(int cpu)
{ … }
static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
{ … }
int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
{ … }
static int intel_pmu_cpu_prepare(int cpu)
{ … }
static void flip_smm_bit(void *data)
{ … }
static void intel_pmu_check_counters_mask(u64 *cntr_mask,
u64 *fixed_cntr_mask,
u64 *intel_ctrl)
{ … }
static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
u64 cntr_mask,
u64 fixed_cntr_mask,
u64 intel_ctrl);
static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs);
static inline bool intel_pmu_broken_perf_cap(void)
{ … }
static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
{ … }
static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
{ … }
static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void)
{ … }
static bool init_hybrid_pmu(int cpu)
{ … }
static void intel_pmu_cpu_starting(int cpu)
{ … }
static void free_excl_cntrs(struct cpu_hw_events *cpuc)
{ … }
static void intel_pmu_cpu_dying(int cpu)
{ … }
void intel_cpuc_finish(struct cpu_hw_events *cpuc)
{ … }
static void intel_pmu_cpu_dead(int cpu)
{ … }
static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
bool sched_in)
{ … }
static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
struct perf_event_pmu_context *next_epc)
{ … }
static int intel_pmu_check_period(struct perf_event *event, u64 value)
{ … }
static void intel_aux_output_init(void)
{ … }
static int intel_pmu_aux_output_match(struct perf_event *event)
{ … }
static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret)
{ … }
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
static struct attribute *intel_arch3_formats_attr[] = …;
static struct attribute *hsw_format_attr[] = …;
static struct attribute *nhm_format_attr[] = …;
static struct attribute *slm_format_attr[] = …;
static struct attribute *cmt_format_attr[] = …;
static struct attribute *skl_format_attr[] = …;
static __initconst const struct x86_pmu core_pmu = …;
static __initconst const struct x86_pmu intel_pmu = …;
static __init void intel_clovertown_quirk(void)
{ … }
static const struct x86_cpu_desc isolation_ucodes[] = …;
static void intel_check_pebs_isolation(void)
{ … }
static __init void intel_pebs_isolation_quirk(void)
{ … }
static const struct x86_cpu_desc pebs_ucodes[] = …;
static bool intel_snb_pebs_broken(void)
{ … }
static void intel_snb_check_microcode(void)
{ … }
static bool is_lbr_from(unsigned long msr)
{ … }
static bool check_msr(unsigned long msr, u64 mask)
{ … }
static __init void intel_sandybridge_quirk(void)
{ … }
static const struct { … } intel_arch_events_map[] __initconst = …;
static __init void intel_arch_events_quirk(void)
{ … }
static __init void intel_nehalem_quirk(void)
{ … }
static __init void intel_ht_bug(void)
{ … }
EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
static struct attribute *hsw_events_attrs[] = …;
static struct attribute *hsw_mem_events_attrs[] = …;
static struct attribute *hsw_tsx_events_attrs[] = …;
EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80");
EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80");
EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
static struct attribute *icl_events_attrs[] = …;
static struct attribute *icl_td_events_attrs[] = …;
static struct attribute *icl_tsx_events_attrs[] = …;
EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2");
EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82");
static struct attribute *glc_events_attrs[] = …;
static struct attribute *glc_td_events_attrs[] = …;
static struct attribute *glc_tsx_events_attrs[] = …;
static ssize_t freeze_on_smi_show(struct device *cdev,
struct device_attribute *attr,
char *buf)
{ … }
static DEFINE_MUTEX(freeze_on_smi_mutex);
static ssize_t freeze_on_smi_store(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
{ … }
static void update_tfa_sched(void *ignored)
{ … }
static ssize_t show_sysctl_tfa(struct device *cdev,
struct device_attribute *attr,
char *buf)
{ … }
static ssize_t set_sysctl_tfa(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
{ … }
static DEVICE_ATTR_RW(freeze_on_smi);
static ssize_t branches_show(struct device *cdev,
struct device_attribute *attr,
char *buf)
{ … }
static DEVICE_ATTR_RO(branches);
static ssize_t branch_counter_nr_show(struct device *cdev,
struct device_attribute *attr,
char *buf)
{ … }
static DEVICE_ATTR_RO(branch_counter_nr);
static ssize_t branch_counter_width_show(struct device *cdev,
struct device_attribute *attr,
char *buf)
{ … }
static DEVICE_ATTR_RO(branch_counter_width);
static struct attribute *lbr_attrs[] = …;
static umode_t
lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
{ … }
static char pmu_name_str[30];
static DEVICE_STRING_ATTR_RO(pmu_name, 0444, pmu_name_str);
static struct attribute *intel_pmu_caps_attrs[] = …;
static DEVICE_ATTR(allow_tsx_force_abort, 0644,
show_sysctl_tfa,
set_sysctl_tfa);
static struct attribute *intel_pmu_attrs[] = …;
static umode_t
default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
{ … }
static umode_t
tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
{ … }
static umode_t
pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
{ … }
static umode_t
mem_is_visible(struct kobject *kobj, struct attribute *attr, int i)
{ … }
static umode_t
exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
{ … }
static umode_t
td_is_visible(struct kobject *kobj, struct attribute *attr, int i)
{ … }
static struct attribute_group group_events_td = …;
static struct attribute_group group_events_mem = …;
static struct attribute_group group_events_tsx = …;
static struct attribute_group group_caps_gen = …;
static struct attribute_group group_caps_lbr = …;
static struct attribute_group group_format_extra = …;
static struct attribute_group group_format_extra_skl = …;
static struct attribute_group group_format_evtsel_ext = …;
static struct attribute_group group_default = …;
static const struct attribute_group *attr_update[] = …;
EVENT_ATTR_STR_HYBRID(slots, slots_adl, "event=0x00,umask=0x4", hybrid_big);
EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small);
EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small);
EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small);
EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small);
EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_adl, "event=0x00,umask=0x84", hybrid_big);
EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85", hybrid_big);
EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_adl, "event=0x00,umask=0x86", hybrid_big);
EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_adl, "event=0x00,umask=0x87", hybrid_big);
static struct attribute *adl_hybrid_events_attrs[] = …;
EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_lnl, "event=0xc2,umask=0x02;event=0x00,umask=0x80", hybrid_big_small);
EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_lnl, "event=0x9c,umask=0x01;event=0x00,umask=0x82", hybrid_big_small);
EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_lnl, "event=0xa4,umask=0x02;event=0x00,umask=0x83", hybrid_big_small);
static struct attribute *lnl_hybrid_events_attrs[] = …;
EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small);
EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82", hybrid_big);
static struct attribute *adl_hybrid_mem_attrs[] = …;
static struct attribute *mtl_hybrid_mem_attrs[] = …;
EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big);
EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big);
EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big);
EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl, "event=0x54,umask=0x1", hybrid_big);
EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl, "event=0x3c,in_tx=1", hybrid_big);
EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big);
EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl, "event=0x54,umask=0x80", hybrid_big);
EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2", hybrid_big);
static struct attribute *adl_hybrid_tsx_attrs[] = …;
FORMAT_ATTR_HYBRID(…);
FORMAT_ATTR_HYBRID(…);
FORMAT_ATTR_HYBRID(…);
FORMAT_ATTR_HYBRID(…);
FORMAT_ATTR_HYBRID(…);
#define ADL_HYBRID_RTM_FORMAT_ATTR …
#define ADL_HYBRID_FORMAT_ATTR …
static struct attribute *adl_hybrid_extra_attr_rtm[] = …;
static struct attribute *adl_hybrid_extra_attr[] = …;
FORMAT_ATTR_HYBRID(…);
static struct attribute *mtl_hybrid_extra_attr_rtm[] = …;
static struct attribute *mtl_hybrid_extra_attr[] = …;
static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
{ … }
static umode_t hybrid_events_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{ … }
static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
{ … }
static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{ … }
static umode_t hybrid_format_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{ … }
static umode_t hybrid_td_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{ … }
static struct attribute_group hybrid_group_events_td = …;
static struct attribute_group hybrid_group_events_mem = …;
static struct attribute_group hybrid_group_events_tsx = …;
static struct attribute_group hybrid_group_format_extra = …;
static ssize_t intel_hybrid_get_attr_cpus(struct device *dev,
struct device_attribute *attr,
char *buf)
{ … }
static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL);
static struct attribute *intel_hybrid_cpus_attrs[] = …;
static struct attribute_group hybrid_group_cpus = …;
static const struct attribute_group *hybrid_attr_update[] = …;
static struct attribute *empty_attrs;
static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
u64 cntr_mask,
u64 fixed_cntr_mask,
u64 intel_ctrl)
{ … }
static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
{ … }
static inline int intel_pmu_v6_addr_offset(int index, bool eventsel)
{ … }
static const struct { … } intel_hybrid_pmu_type_map[] __initconst = …;
static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
{ … }
static __always_inline void intel_pmu_ref_cycles_ext(void)
{ … }
static __always_inline void intel_pmu_init_glc(struct pmu *pmu)
{ … }
static __always_inline void intel_pmu_init_grt(struct pmu *pmu)
{ … }
static __always_inline void intel_pmu_init_lnc(struct pmu *pmu)
{ … }
static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
{ … }
__init int intel_pmu_init(void)
{ … }
static __init int fixup_ht_bug(void)
{ … }
subsys_initcall(…) …