linux/arch/x86/include/asm/perf_event.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PERF_EVENT_H
#define _ASM_X86_PERF_EVENT_H

#include <linux/static_call.h>

/*
 * Performance event hw details:
 */

#define INTEL_PMC_MAX_GENERIC
#define INTEL_PMC_MAX_FIXED
#define INTEL_PMC_IDX_FIXED

#define X86_PMC_IDX_MAX

#define MSR_ARCH_PERFMON_PERFCTR0
#define MSR_ARCH_PERFMON_PERFCTR1

#define MSR_ARCH_PERFMON_EVENTSEL0
#define MSR_ARCH_PERFMON_EVENTSEL1

#define ARCH_PERFMON_EVENTSEL_EVENT
#define ARCH_PERFMON_EVENTSEL_UMASK
#define ARCH_PERFMON_EVENTSEL_USR
#define ARCH_PERFMON_EVENTSEL_OS
#define ARCH_PERFMON_EVENTSEL_EDGE
#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL
#define ARCH_PERFMON_EVENTSEL_INT
#define ARCH_PERFMON_EVENTSEL_ANY
#define ARCH_PERFMON_EVENTSEL_ENABLE
#define ARCH_PERFMON_EVENTSEL_INV
#define ARCH_PERFMON_EVENTSEL_CMASK
#define ARCH_PERFMON_EVENTSEL_BR_CNTR
#define ARCH_PERFMON_EVENTSEL_EQ
#define ARCH_PERFMON_EVENTSEL_UMASK2

#define INTEL_FIXED_BITS_MASK
#define INTEL_FIXED_BITS_STRIDE
#define INTEL_FIXED_0_KERNEL
#define INTEL_FIXED_0_USER
#define INTEL_FIXED_0_ANYTHREAD
#define INTEL_FIXED_0_ENABLE_PMI

#define HSW_IN_TX
#define HSW_IN_TX_CHECKPOINTED
#define ICL_EVENTSEL_ADAPTIVE
#define ICL_FIXED_0_ADAPTIVE

#define intel_fixed_bits_by_idx(_idx, _bits)

#define AMD64_EVENTSEL_INT_CORE_ENABLE
#define AMD64_EVENTSEL_GUESTONLY
#define AMD64_EVENTSEL_HOSTONLY

#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT
#define AMD64_EVENTSEL_INT_CORE_SEL_MASK

#define AMD64_EVENTSEL_EVENT
#define INTEL_ARCH_EVENT_MASK

#define AMD64_L3_SLICE_SHIFT
#define AMD64_L3_SLICE_MASK
#define AMD64_L3_SLICEID_MASK

#define AMD64_L3_THREAD_SHIFT
#define AMD64_L3_THREAD_MASK
#define AMD64_L3_F19H_THREAD_MASK

#define AMD64_L3_EN_ALL_CORES
#define AMD64_L3_EN_ALL_SLICES

#define AMD64_L3_COREID_SHIFT
#define AMD64_L3_COREID_MASK

#define X86_RAW_EVENT_MASK
#define X86_ALL_EVENT_FLAGS
#define AMD64_RAW_EVENT_MASK
#define AMD64_RAW_EVENT_MASK_NB

#define AMD64_PERFMON_V2_EVENTSEL_EVENT_NB

#define AMD64_PERFMON_V2_EVENTSEL_UMASK_NB

#define AMD64_PERFMON_V2_RAW_EVENT_MASK_NB

#define AMD64_PERFMON_V2_ENABLE_UMC
#define AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC
#define AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC
#define AMD64_PERFMON_V2_RAW_EVENT_MASK_UMC

#define AMD64_NUM_COUNTERS
#define AMD64_NUM_COUNTERS_CORE
#define AMD64_NUM_COUNTERS_NB

#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT

#define ARCH_PERFMON_BRANCH_MISSES_RETIRED
#define ARCH_PERFMON_EVENTS_COUNT

#define PEBS_DATACFG_MEMINFO
#define PEBS_DATACFG_GP
#define PEBS_DATACFG_XMMS
#define PEBS_DATACFG_LBRS
#define PEBS_DATACFG_LBR_SHIFT

/* Steal the highest bit of pebs_data_cfg for SW usage */
#define PEBS_UPDATE_DS_SW

/*
 * Intel "Architectural Performance Monitoring" CPUID
 * detection/enumeration details:
 */
cpuid10_eax;

cpuid10_ebx;

cpuid10_edx;

/*
 * Intel "Architectural Performance Monitoring extension" CPUID
 * detection/enumeration details:
 */
#define ARCH_PERFMON_EXT_LEAF
#define ARCH_PERFMON_EXT_UMASK2
#define ARCH_PERFMON_EXT_EQ
#define ARCH_PERFMON_NUM_COUNTER_LEAF_BIT
#define ARCH_PERFMON_NUM_COUNTER_LEAF

/*
 * Intel Architectural LBR CPUID detection/enumeration details:
 */
cpuid28_eax;

cpuid28_ebx;

cpuid28_ecx;

/*
 * AMD "Extended Performance Monitoring and Debug" CPUID
 * detection/enumeration details:
 */
cpuid_0x80000022_ebx;

struct x86_pmu_capability {};

/*
 * Fixed-purpose performance events:
 */

/* RDPMC offset for Fixed PMCs */
#define INTEL_PMC_FIXED_RDPMC_BASE
#define INTEL_PMC_FIXED_RDPMC_METRICS

/*
 * All the fixed-mode PMCs are configured via this single MSR:
 */
#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL

/*
 * There is no event-code assigned to the fixed-mode PMCs.
 *
 * For a fixed-mode PMC, which has an equivalent event on a general-purpose
 * PMC, the event-code of the equivalent event is used for the fixed-mode PMC,
 * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core.
 *
 * For a fixed-mode PMC, which doesn't have an equivalent event, a
 * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS.
 * The pseudo event-code for a fixed-mode PMC must be 0x00.
 * The pseudo umask-code is 0xX. The X equals the index of the fixed
 * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300.
 *
 * The counts are available in separate MSRs:
 */

/* Instr_Retired.Any: */
#define MSR_ARCH_PERFMON_FIXED_CTR0
#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS

/* CPU_CLK_Unhalted.Core: */
#define MSR_ARCH_PERFMON_FIXED_CTR1
#define INTEL_PMC_IDX_FIXED_CPU_CYCLES

/* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */
#define MSR_ARCH_PERFMON_FIXED_CTR2
#define INTEL_PMC_IDX_FIXED_REF_CYCLES
#define INTEL_PMC_MSK_FIXED_REF_CYCLES

/* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */
#define MSR_ARCH_PERFMON_FIXED_CTR3
#define INTEL_PMC_IDX_FIXED_SLOTS
#define INTEL_PMC_MSK_FIXED_SLOTS

/* TOPDOWN_BAD_SPECULATION.ALL: fixed counter 4 (Atom only) */
/* TOPDOWN_FE_BOUND.ALL: fixed counter 5 (Atom only) */
/* TOPDOWN_RETIRING.ALL: fixed counter 6 (Atom only) */

static inline bool use_fixed_pseudo_encoding(u64 code)
{}

/*
 * We model BTS tracing as another fixed-mode PMC.
 *
 * We choose the value 47 for the fixed index of BTS, since lower
 * values are used by actual fixed events and higher values are used
 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
 */
#define INTEL_PMC_IDX_FIXED_BTS

/*
 * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for
 * each TopDown metric event.
 *
 * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS).
 */
#define INTEL_PMC_IDX_METRIC_BASE
#define INTEL_PMC_IDX_TD_RETIRING
#define INTEL_PMC_IDX_TD_BAD_SPEC
#define INTEL_PMC_IDX_TD_FE_BOUND
#define INTEL_PMC_IDX_TD_BE_BOUND
#define INTEL_PMC_IDX_TD_HEAVY_OPS
#define INTEL_PMC_IDX_TD_BR_MISPREDICT
#define INTEL_PMC_IDX_TD_FETCH_LAT
#define INTEL_PMC_IDX_TD_MEM_BOUND
#define INTEL_PMC_IDX_METRIC_END
#define INTEL_PMC_MSK_TOPDOWN

/*
 * There is no event-code assigned to the TopDown events.
 *
 * For the slots event, use the pseudo code of the fixed counter 3.
 *
 * For the metric events, the pseudo event-code is 0x00.
 * The pseudo umask-code starts from the middle of the pseudo event
 * space, 0x80.
 */
#define INTEL_TD_SLOTS
/* Level 1 metrics */
#define INTEL_TD_METRIC_RETIRING
#define INTEL_TD_METRIC_BAD_SPEC
#define INTEL_TD_METRIC_FE_BOUND
#define INTEL_TD_METRIC_BE_BOUND
/* Level 2 metrics */
#define INTEL_TD_METRIC_HEAVY_OPS
#define INTEL_TD_METRIC_BR_MISPREDICT
#define INTEL_TD_METRIC_FETCH_LAT
#define INTEL_TD_METRIC_MEM_BOUND

#define INTEL_TD_METRIC_MAX
#define INTEL_TD_METRIC_NUM

static inline bool is_metric_idx(int idx)
{}

static inline bool is_topdown_idx(int idx)
{}

#define INTEL_PMC_OTHER_TOPDOWN_BITS(bit)

#define GLOBAL_STATUS_COND_CHG
#define GLOBAL_STATUS_BUFFER_OVF_BIT
#define GLOBAL_STATUS_BUFFER_OVF
#define GLOBAL_STATUS_UNC_OVF
#define GLOBAL_STATUS_ASIF
#define GLOBAL_STATUS_COUNTERS_FROZEN
#define GLOBAL_STATUS_LBRS_FROZEN_BIT
#define GLOBAL_STATUS_LBRS_FROZEN
#define GLOBAL_STATUS_TRACE_TOPAPMI_BIT
#define GLOBAL_STATUS_TRACE_TOPAPMI
#define GLOBAL_STATUS_PERF_METRICS_OVF_BIT

#define GLOBAL_CTRL_EN_PERF_METRICS
/*
 * We model guest LBR event tracing as another fixed-mode PMC like BTS.
 *
 * We choose bit 58 because it's used to indicate LBR stack frozen state
 * for architectural perfmon v4, also we unconditionally mask that bit in
 * the handle_pmi_common(), so it'll never be set in the overflow handling.
 *
 * With this fake counter assigned, the guest LBR event user (such as KVM),
 * can program the LBR registers on its own, and we don't actually do anything
 * with then in the host context.
 */
#define INTEL_PMC_IDX_FIXED_VLBR

/*
 * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b,
 * since it would claim bit 58 which is effectively Fixed26.
 */
#define INTEL_FIXED_VLBR_EVENT

/*
 * Adaptive PEBS v4
 */

struct pebs_basic {};

struct pebs_meminfo {};

struct pebs_gprs {};

struct pebs_xmm {};

/*
 * AMD Extended Performance Monitoring and Debug cpuid feature detection
 */
#define EXT_PERFMON_DEBUG_FEATURES

/*
 * IBS cpuid feature detection
 */

#define IBS_CPUID_FEATURES

/*
 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
 * bit 0 is used to indicate the existence of IBS.
 */
#define IBS_CAPS_AVAIL
#define IBS_CAPS_FETCHSAM
#define IBS_CAPS_OPSAM
#define IBS_CAPS_RDWROPCNT
#define IBS_CAPS_OPCNT
#define IBS_CAPS_BRNTRGT
#define IBS_CAPS_OPCNTEXT
#define IBS_CAPS_RIPINVALIDCHK
#define IBS_CAPS_OPBRNFUSE
#define IBS_CAPS_FETCHCTLEXTD
#define IBS_CAPS_OPDATA4
#define IBS_CAPS_ZEN4

#define IBS_CAPS_DEFAULT

/*
 * IBS APIC setup
 */
#define IBSCTL
#define IBSCTL_LVT_OFFSET_VALID
#define IBSCTL_LVT_OFFSET_MASK

/* IBS fetch bits/masks */
#define IBS_FETCH_L3MISSONLY
#define IBS_FETCH_RAND_EN
#define IBS_FETCH_VAL
#define IBS_FETCH_ENABLE
#define IBS_FETCH_CNT
#define IBS_FETCH_MAX_CNT

/*
 * IBS op bits/masks
 * The lower 7 bits of the current count are random bits
 * preloaded by hardware and ignored in software
 */
#define IBS_OP_CUR_CNT
#define IBS_OP_CUR_CNT_RAND
#define IBS_OP_CNT_CTL
#define IBS_OP_VAL
#define IBS_OP_ENABLE
#define IBS_OP_L3MISSONLY
#define IBS_OP_MAX_CNT
#define IBS_OP_MAX_CNT_EXT
#define IBS_OP_MAX_CNT_EXT_MASK
#define IBS_RIP_INVALID

#ifdef CONFIG_X86_LOCAL_APIC
extern u32 get_ibs_caps(void);
extern int forward_event_to_ibs(struct perf_event *event);
#else
static inline u32 get_ibs_caps(void) { return 0; }
static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; }
#endif

#ifdef CONFIG_PERF_EVENTS
extern void perf_events_lapic_init(void);

/*
 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
 * unused and ABI specified to be 0, so nobody should care what we do with
 * them.
 *
 * EXACT - the IP points to the exact instruction that triggered the
 *         event (HW bugs exempt).
 * VM    - original X86_VM_MASK; see set_linear_ip().
 */
#define PERF_EFLAGS_EXACT
#define PERF_EFLAGS_VM

struct pt_regs;
struct x86_perf_regs {};

extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs)

#include <asm/stacktrace.h>

/*
 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
 * and the comment with PERF_EFLAGS_EXACT.
 */
#define perf_arch_fetch_caller_regs(regs, __ip)

struct perf_guest_switch_msr {};

struct x86_pmu_lbr {};

extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
extern u64 perf_get_hw_event_config(int hw_event);
extern void perf_check_microcode(void);
extern void perf_clear_dirty_counters(void);
extern int x86_perf_rdpmc_index(struct perf_event *event);
#else
static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
{
	memset(cap, 0, sizeof(*cap));
}

static inline u64 perf_get_hw_event_config(int hw_event)
{
	return 0;
}

static inline void perf_events_lapic_init(void)	{ }
static inline void perf_check_microcode(void) { }
#endif

#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
#else
struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
{
	memset(lbr, 0, sizeof(*lbr));
}
#endif

#ifdef CONFIG_CPU_SUP_INTEL
 extern void intel_pt_handle_vmx(int on);
#else
static inline void intel_pt_handle_vmx(int on)
{

}
#endif

#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
 extern void amd_pmu_enable_virt(void);
 extern void amd_pmu_disable_virt(void);

#if defined(CONFIG_PERF_EVENTS_AMD_BRS)

#define PERF_NEEDS_LOPWR_CB

/*
 * architectural low power callback impacts
 * drivers/acpi/processor_idle.c
 * drivers/acpi/acpi_pad.c
 */
extern void perf_amd_brs_lopwr_cb(bool lopwr_in);

DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb);

static __always_inline void perf_lopwr_cb(bool lopwr_in)
{}

#endif /* PERF_NEEDS_LOPWR_CB */

#else
 static inline void amd_pmu_enable_virt(void) { }
 static inline void amd_pmu_disable_virt(void) { }
#endif

#define arch_perf_out_copy_user

#endif /* _ASM_X86_PERF_EVENT_H */