linux/arch/x86/events/intel/ds.c

// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/sched/clock.h>

#include <asm/cpu_entry_area.h>
#include <asm/debugreg.h>
#include <asm/perf_event.h>
#include <asm/tlbflush.h>
#include <asm/insn.h>
#include <asm/io.h>
#include <asm/timer.h>

#include "../perf_event.h"

/* Waste a full page so it can be mapped into the cpu_entry_area */
DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);

/* The size of a BTS record in bytes: */
#define BTS_RECORD_SIZE

#define PEBS_FIXUP_SIZE

/*
 * pebs_record_32 for p4 and core not supported

struct pebs_record_32 {
	u32 flags, ip;
	u32 ax, bc, cx, dx;
	u32 si, di, bp, sp;
};

 */

intel_x86_pebs_dse;


/*
 * Map PEBS Load Latency Data Source encodings to generic
 * memory data source information
 */
#define P(a, b)
#define OP_LH
#define LEVEL(x)
#define REM
#define SNOOP_NONE_MISS

/* Version for Sandy Bridge and later */
static u64 pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX] =;

/* Patch up minor differences in the bits */
void __init intel_pmu_pebs_data_source_nhm(void)
{}

static void __init __intel_pmu_pebs_data_source_skl(bool pmem, u64 *data_source)
{}

void __init intel_pmu_pebs_data_source_skl(bool pmem)
{}

static void __init __intel_pmu_pebs_data_source_grt(u64 *data_source)
{}

void __init intel_pmu_pebs_data_source_grt(void)
{}

void __init intel_pmu_pebs_data_source_adl(void)
{}

static void __init __intel_pmu_pebs_data_source_cmt(u64 *data_source)
{}

void __init intel_pmu_pebs_data_source_mtl(void)
{}

void __init intel_pmu_pebs_data_source_cmt(void)
{}

/* Version for Lion Cove and later */
static u64 lnc_pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX] =;

void __init intel_pmu_pebs_data_source_lnl(void)
{}

static u64 precise_store_data(u64 status)
{}

static u64 precise_datala_hsw(struct perf_event *event, u64 status)
{}

static inline void pebs_set_tlb_lock(u64 *val, bool tlb, bool lock)
{}

/* Retrieve the latency data for e-core of ADL */
static u64 __grt_latency_data(struct perf_event *event, u64 status,
			       u8 dse, bool tlb, bool lock, bool blk)
{}

u64 grt_latency_data(struct perf_event *event, u64 status)
{}

/* Retrieve the latency data for e-core of MTL */
u64 cmt_latency_data(struct perf_event *event, u64 status)
{}

static u64 lnc_latency_data(struct perf_event *event, u64 status)
{}

u64 lnl_latency_data(struct perf_event *event, u64 status)
{}

static u64 load_latency_data(struct perf_event *event, u64 status)
{}

static u64 store_latency_data(struct perf_event *event, u64 status)
{}

struct pebs_record_core {};

struct pebs_record_nhm {};

/*
 * Same as pebs_record_nhm, with two additional fields.
 */
struct pebs_record_hsw {};

hsw_tsx_tuning;

#define PEBS_HSW_TSX_FLAGS

/* Same as HSW, plus TSC */

struct pebs_record_skl {};

void init_debug_store_on_cpu(int cpu)
{}

void fini_debug_store_on_cpu(int cpu)
{}

static DEFINE_PER_CPU(void *, insn_buffer);

static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
{}

static void ds_clear_cea(void *cea, size_t size)
{}

static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
{}

static void dsfree_pages(const void *buffer, size_t size)
{}

static int alloc_pebs_buffer(int cpu)
{}

static void release_pebs_buffer(int cpu)
{}

static int alloc_bts_buffer(int cpu)
{}

static void release_bts_buffer(int cpu)
{}

static int alloc_ds_buffer(int cpu)
{}

static void release_ds_buffer(int cpu)
{}

void release_ds_buffers(void)
{}

void reserve_ds_buffers(void)
{}

/*
 * BTS
 */

struct event_constraint bts_constraint =;

void intel_pmu_enable_bts(u64 config)
{}

void intel_pmu_disable_bts(void)
{}

int intel_pmu_drain_bts_buffer(void)
{}

static inline void intel_pmu_drain_pebs_buffer(void)
{}

/*
 * PEBS
 */
struct event_constraint intel_core2_pebs_event_constraints[] =;

struct event_constraint intel_atom_pebs_event_constraints[] =;

struct event_constraint intel_slm_pebs_event_constraints[] =;

struct event_constraint intel_glm_pebs_event_constraints[] =;

struct event_constraint intel_grt_pebs_event_constraints[] =;

struct event_constraint intel_nehalem_pebs_event_constraints[] =;

struct event_constraint intel_westmere_pebs_event_constraints[] =;

struct event_constraint intel_snb_pebs_event_constraints[] =;

struct event_constraint intel_ivb_pebs_event_constraints[] =;

struct event_constraint intel_hsw_pebs_event_constraints[] =;

struct event_constraint intel_bdw_pebs_event_constraints[] =;


struct event_constraint intel_skl_pebs_event_constraints[] =;

struct event_constraint intel_icl_pebs_event_constraints[] =;

struct event_constraint intel_glc_pebs_event_constraints[] =;

struct event_constraint intel_lnc_pebs_event_constraints[] =;

struct event_constraint *intel_pebs_constraints(struct perf_event *event)
{}

/*
 * We need the sched_task callback even for per-cpu events when we use
 * the large interrupt threshold, such that we can provide PID and TID
 * to PEBS samples.
 */
static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
{}

void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{}

static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
{}

static void adaptive_pebs_record_size_update(void)
{}

#define PERF_PEBS_MEMINFO_TYPE

static u64 pebs_update_adaptive_cfg(struct perf_event *event)
{}

static void
pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
		  struct perf_event *event, bool add)
{}

void intel_pmu_pebs_add(struct perf_event *event)
{}

static void intel_pmu_pebs_via_pt_disable(struct perf_event *event)
{}

static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
{}

static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
{}

void intel_pmu_pebs_enable(struct perf_event *event)
{}

void intel_pmu_pebs_del(struct perf_event *event)
{}

void intel_pmu_pebs_disable(struct perf_event *event)
{}

void intel_pmu_pebs_enable_all(void)
{}

void intel_pmu_pebs_disable_all(void)
{}

static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
{}

static inline u64 intel_get_tsx_weight(u64 tsx_tuning)
{}

static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax)
{}

static inline u64 get_pebs_status(void *n)
{}

#define PERF_X86_EVENT_PEBS_HSW_PREC

static u64 get_data_src(struct perf_event *event, u64 aux)
{}

static void setup_pebs_time(struct perf_event *event,
			    struct perf_sample_data *data,
			    u64 tsc)
{}

#define PERF_SAMPLE_ADDR_TYPE

static void setup_pebs_fixed_sample_data(struct perf_event *event,
				   struct pt_regs *iregs, void *__pebs,
				   struct perf_sample_data *data,
				   struct pt_regs *regs)
{}

static void adaptive_pebs_save_regs(struct pt_regs *regs,
				    struct pebs_gprs *gprs)
{}

#define PEBS_LATENCY_MASK
#define PEBS_CACHE_LATENCY_OFFSET
#define PEBS_RETIRE_LATENCY_OFFSET

/*
 * With adaptive PEBS the layout depends on what fields are configured.
 */

static void setup_pebs_adaptive_sample_data(struct perf_event *event,
					    struct pt_regs *iregs, void *__pebs,
					    struct perf_sample_data *data,
					    struct pt_regs *regs)
{}

static inline void *
get_next_pebs_record_by_bit(void *base, void *top, int bit)
{}

void intel_pmu_auto_reload_read(struct perf_event *event)
{}

/*
 * Special variant of intel_pmu_save_and_restart() for auto-reload.
 */
static int
intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
{}

static __always_inline void
__intel_pmu_pebs_event(struct perf_event *event,
		       struct pt_regs *iregs,
		       struct perf_sample_data *data,
		       void *base, void *top,
		       int bit, int count,
		       void (*setup_sample)(struct perf_event *,
					    struct pt_regs *,
					    void *,
					    struct perf_sample_data *,
					    struct pt_regs *))
{}

static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data)
{}

static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
{}

static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_data *data)
{}

static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data)
{}

/*
 * BTS, PEBS probe and setup
 */

void __init intel_ds_init(void)
{}

void perf_restore_debug_store(void)
{}