linux/arch/x86/include/asm/perf_event_p4.h

/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Netburst Performance Events (P4, old Xeon)
 */

#ifndef PERF_EVENT_P4_H
#define PERF_EVENT_P4_H

#include <linux/cpu.h>
#include <linux/bitops.h>

/*
 * NetBurst has performance MSRs shared between
 * threads if HT is turned on, ie for both logical
 * processors (mem: in turn in Atom with HT support
 * perf-MSRs are not shared and every thread has its
 * own perf-MSRs set)
 */
#define ARCH_P4_TOTAL_ESCR
#define ARCH_P4_RESERVED_ESCR
#define ARCH_P4_MAX_ESCR
#define ARCH_P4_MAX_CCCR

#define ARCH_P4_CNTRVAL_BITS
#define ARCH_P4_CNTRVAL_MASK
#define ARCH_P4_UNFLAGGED_BIT

#define P4_ESCR_EVENT_MASK
#define P4_ESCR_EVENT_SHIFT
#define P4_ESCR_EVENTMASK_MASK
#define P4_ESCR_EVENTMASK_SHIFT
#define P4_ESCR_TAG_MASK
#define P4_ESCR_TAG_SHIFT
#define P4_ESCR_TAG_ENABLE
#define P4_ESCR_T0_OS
#define P4_ESCR_T0_USR
#define P4_ESCR_T1_OS
#define P4_ESCR_T1_USR

#define P4_ESCR_EVENT(v)
#define P4_ESCR_EMASK(v)
#define P4_ESCR_TAG(v)

#define P4_CCCR_OVF
#define P4_CCCR_CASCADE
#define P4_CCCR_OVF_PMI_T0
#define P4_CCCR_OVF_PMI_T1
#define P4_CCCR_FORCE_OVF
#define P4_CCCR_EDGE
#define P4_CCCR_THRESHOLD_MASK
#define P4_CCCR_THRESHOLD_SHIFT
#define P4_CCCR_COMPLEMENT
#define P4_CCCR_COMPARE
#define P4_CCCR_ESCR_SELECT_MASK
#define P4_CCCR_ESCR_SELECT_SHIFT
#define P4_CCCR_ENABLE
#define P4_CCCR_THREAD_SINGLE
#define P4_CCCR_THREAD_BOTH
#define P4_CCCR_THREAD_ANY
#define P4_CCCR_RESERVED

#define P4_CCCR_THRESHOLD(v)
#define P4_CCCR_ESEL(v)

#define P4_GEN_ESCR_EMASK(class, name, bit)
#define P4_ESCR_EMASK_BIT(class, name)

/*
 * config field is 64bit width and consists of
 * HT << 63 | ESCR << 32 | CCCR
 * where HT is HyperThreading bit (since ESCR
 * has it reserved we may use it for own purpose)
 *
 * note that this is NOT the addresses of respective
 * ESCR and CCCR but rather an only packed value should
 * be unpacked and written to a proper addresses
 *
 * the base idea is to pack as much info as possible
 */
#define p4_config_pack_escr(v)
#define p4_config_pack_cccr(v)
#define p4_config_unpack_escr(v)
#define p4_config_unpack_cccr(v)

#define p4_config_unpack_emask(v)

#define p4_config_unpack_event(v)

#define P4_CONFIG_HT_SHIFT
#define P4_CONFIG_HT

/*
 * If an event has alias it should be marked
 * with a special bit. (Don't forget to check
 * P4_PEBS_CONFIG_MASK and related bits on
 * modification.)
 */
#define P4_CONFIG_ALIASABLE

/*
 * The bits we allow to pass for RAW events
 */
#define P4_CONFIG_MASK_ESCR

#define P4_CONFIG_MASK_CCCR

/* some dangerous bits are reserved for kernel internals */
#define P4_CONFIG_MASK

/*
 * In case of event aliasing we need to preserve some
 * caller bits, otherwise the mapping won't be complete.
 */
#define P4_CONFIG_EVENT_ALIAS_MASK

#define P4_CONFIG_EVENT_ALIAS_IMMUTABLE_BITS

static inline bool p4_is_event_cascaded(u64 config)
{}

static inline int p4_ht_config_thread(u64 config)
{}

static inline u64 p4_set_ht_bit(u64 config)
{}

static inline u64 p4_clear_ht_bit(u64 config)
{}

static inline int p4_ht_active(void)
{}

static inline int p4_ht_thread(int cpu)
{}

static inline int p4_should_swap_ts(u64 config, int cpu)
{}

static inline u32 p4_default_cccr_conf(int cpu)
{}

static inline u32 p4_default_escr_conf(int cpu, int exclude_os, int exclude_usr)
{}

/*
 * This are the events which should be used in "Event Select"
 * field of ESCR register, they are like unique keys which allow
 * the kernel to determinate which CCCR and COUNTER should be
 * used to track an event
 */
enum P4_EVENTS {};

#define P4_OPCODE(event)
#define P4_OPCODE_ESEL(opcode)
#define P4_OPCODE_EVNT(opcode)
#define P4_OPCODE_PACK(event, sel)

/*
 * Comments below the event represent ESCR restriction
 * for this event and counter index per ESCR
 *
 * MSR_P4_IQ_ESCR0 and MSR_P4_IQ_ESCR1 are available only on early
 * processor builds (family 0FH, models 01H-02H). These MSRs
 * are not available on later versions, so that we don't use
 * them completely
 *
 * Also note that CCCR1 do not have P4_CCCR_ENABLE bit properly
 * working so that we should not use this CCCR and respective
 * counter as result
 */
enum P4_EVENT_OPCODES {};

/*
 * a caller should use P4_ESCR_EMASK_NAME helper to
 * pick the EventMask needed, for example
 *
 *	P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DD)
 */
enum P4_ESCR_EMASKS {};

/*
 * Note we have UOP and PEBS bits reserved for now
 * just in case if we will need them once
 */
#define P4_PEBS_CONFIG_ENABLE
#define P4_PEBS_CONFIG_UOP_TAG
#define P4_PEBS_CONFIG_METRIC_MASK
#define P4_PEBS_CONFIG_MASK

/*
 * mem: Only counters MSR_IQ_COUNTER4 (16) and
 * MSR_IQ_COUNTER5 (17) are allowed for PEBS sampling
 */
#define P4_PEBS_ENABLE
#define P4_PEBS_ENABLE_UOP_TAG

#define p4_config_unpack_metric(v)
#define p4_config_unpack_pebs(v)

#define p4_config_pebs_has(v, mask)

enum P4_PEBS_METRIC {};

/*
 * Notes on internal configuration of ESCR+CCCR tuples
 *
 * Since P4 has quite the different architecture of
 * performance registers in compare with "architectural"
 * once and we have on 64 bits to keep configuration
 * of performance event, the following trick is used.
 *
 * 1) Since both ESCR and CCCR registers have only low
 *    32 bits valuable, we pack them into a single 64 bit
 *    configuration. Low 32 bits of such config correspond
 *    to low 32 bits of CCCR register and high 32 bits
 *    correspond to low 32 bits of ESCR register.
 *
 * 2) The meaning of every bit of such config field can
 *    be found in Intel SDM but it should be noted that
 *    we "borrow" some reserved bits for own usage and
 *    clean them or set to a proper value when we do
 *    a real write to hardware registers.
 *
 * 3) The format of bits of config is the following
 *    and should be either 0 or set to some predefined
 *    values:
 *
 *    Low 32 bits
 *    -----------
 *      0-6: P4_PEBS_METRIC enum
 *     7-11:                    reserved
 *       12:                    reserved (Enable)
 *    13-15:                    reserved (ESCR select)
 *    16-17: Active Thread
 *       18: Compare
 *       19: Complement
 *    20-23: Threshold
 *       24: Edge
 *       25:                    reserved (FORCE_OVF)
 *       26:                    reserved (OVF_PMI_T0)
 *       27:                    reserved (OVF_PMI_T1)
 *    28-29:                    reserved
 *       30:                    reserved (Cascade)
 *       31:                    reserved (OVF)
 *
 *    High 32 bits
 *    ------------
 *        0:                    reserved (T1_USR)
 *        1:                    reserved (T1_OS)
 *        2:                    reserved (T0_USR)
 *        3:                    reserved (T0_OS)
 *        4: Tag Enable
 *      5-8: Tag Value
 *     9-24: Event Mask (may use P4_ESCR_EMASK_BIT helper)
 *    25-30: enum P4_EVENTS
 *       31:                    reserved (HT thread)
 */

#endif /* PERF_EVENT_P4_H */