linux/arch/x86/events/intel/p4.c

/*
 * Netburst Performance Events (P4, old Xeon)
 *
 *  Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <[email protected]>
 *  Copyright (C) 2010 Intel Corporation, Lin Ming <[email protected]>
 *
 *  For licencing details see kernel-base/COPYING
 */

#include <linux/perf_event.h>

#include <asm/perf_event_p4.h>
#include <asm/hardirq.h>
#include <asm/apic.h>

#include "../perf_event.h"

#define P4_CNTR_LIMIT
/*
 * array indices: 0,1 - HT threads, used with HT enabled cpu
 */
struct p4_event_bind {};

struct p4_pebs_bind {};

/* it sets P4_PEBS_ENABLE_UOP_TAG as well */
#define P4_GEN_PEBS_BIND(name, pebs, vert)

/*
 * note we have P4_PEBS_ENABLE_UOP_TAG always set here
 *
 * it's needed for mapping P4_PEBS_CONFIG_METRIC_MASK bits of
 * event configuration to find out which values are to be
 * written into MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT
 * registers
 */
static struct p4_pebs_bind p4_pebs_bind_map[] =;

/*
 * Note that we don't use CCCR1 here, there is an
 * exception for P4_BSQ_ALLOCATION but we just have
 * no workaround
 *
 * consider this binding as resources which particular
 * event may borrow, it doesn't contain EventMask,
 * Tags and friends -- they are left to a caller
 */
static struct p4_event_bind p4_event_bind_map[] =;

#define P4_GEN_CACHE_EVENT(event, bit, metric)

static __initconst const u64 p4_hw_cache_event_ids
				[PERF_COUNT_HW_CACHE_MAX]
				[PERF_COUNT_HW_CACHE_OP_MAX]
				[PERF_COUNT_HW_CACHE_RESULT_MAX] =;

/*
 * Because of Netburst being quite restricted in how many
 * identical events may run simultaneously, we introduce event aliases,
 * ie the different events which have the same functionality but
 * utilize non-intersected resources (ESCR/CCCR/counter registers).
 *
 * This allow us to relax restrictions a bit and run two or more
 * identical events together.
 *
 * Never set any custom internal bits such as P4_CONFIG_HT,
 * P4_CONFIG_ALIASABLE or bits for P4_PEBS_METRIC, they are
 * either up to date automatically or not applicable at all.
 */
static struct p4_event_alias {} p4_event_aliases[] =;

static u64 p4_get_alias_event(u64 config)
{}

static u64 p4_general_events[PERF_COUNT_HW_MAX] =;

static struct p4_event_bind *p4_config_get_bind(u64 config)
{}

static u64 p4_pmu_event_map(int hw_event)
{}

/* check cpu model specifics */
static bool p4_event_match_cpu_model(unsigned int event_idx)
{}

static int p4_validate_raw_event(struct perf_event *event)
{}

static int p4_hw_config(struct perf_event *event)
{}

static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
{}

static void p4_pmu_disable_pebs(void)
{}

static inline void p4_pmu_disable_event(struct perf_event *event)
{}

static void p4_pmu_disable_all(void)
{}

/* configuration must be valid */
static void p4_pmu_enable_pebs(u64 config)
{}

static void __p4_pmu_enable_event(struct perf_event *event)
{}

static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(X86_PMC_IDX_MAX)], p4_running);

static void p4_pmu_enable_event(struct perf_event *event)
{}

static void p4_pmu_enable_all(int added)
{}

static int p4_pmu_set_period(struct perf_event *event)
{}

static int p4_pmu_handle_irq(struct pt_regs *regs)
{}

/*
 * swap thread specific fields according to a thread
 * we are going to run on
 */
static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
{}

/*
 * ESCR address hashing is tricky, ESCRs are not sequential
 * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03a0) and
 * the metric between any ESCRs is laid in range [0xa0,0xe1]
 *
 * so we make ~70% filled hashtable
 */

#define P4_ESCR_MSR_BASE
#define P4_ESCR_MSR_MAX
#define P4_ESCR_MSR_TABLE_SIZE
#define P4_ESCR_MSR_IDX(msr)
#define P4_ESCR_MSR_TABLE_ENTRY(msr)

static const unsigned int p4_escr_table[P4_ESCR_MSR_TABLE_SIZE] =;

static int p4_get_escr_idx(unsigned int addr)
{}

static int p4_next_cntr(int thread, unsigned long *used_mask,
			struct p4_event_bind *bind)
{}

static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{}

PMU_FORMAT_ATTR();
PMU_FORMAT_ATTR();
PMU_FORMAT_ATTR();

static struct attribute *intel_p4_formats_attr[] =;

static __initconst const struct x86_pmu p4_pmu =;

__init int p4_pmu_init(void)
{}