linux/drivers/perf/cxl_pmu.c

// SPDX-License-Identifier: GPL-2.0-only

/*
 * Copyright(c) 2023 Huawei
 *
 * The CXL 3.0 specification includes a standard Performance Monitoring Unit,
 * called the CXL PMU, or CPMU. In order to allow a high degree of
 * implementation flexibility the specification provides a wide range of
 * options all of which are self describing.
 *
 * Details in CXL rev 3.0 section 8.2.7 CPMU Register Interface
 */

#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/perf_event.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/bits.h>
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/pci.h>

#include "../cxl/cxlpci.h"
#include "../cxl/cxl.h"
#include "../cxl/pmu.h"

#define CXL_PMU_CAP_REG
#define CXL_PMU_CAP_NUM_COUNTERS_MSK
#define CXL_PMU_CAP_COUNTER_WIDTH_MSK
#define CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK
#define CXL_PMU_CAP_FILTERS_SUP_MSK
#define CXL_PMU_FILTER_HDM
#define CXL_PMU_FILTER_CHAN_RANK_BANK
#define CXL_PMU_CAP_MSI_N_MSK
#define CXL_PMU_CAP_WRITEABLE_WHEN_FROZEN
#define CXL_PMU_CAP_FREEZE
#define CXL_PMU_CAP_INT
#define CXL_PMU_CAP_VERSION_MSK

#define CXL_PMU_OVERFLOW_REG
#define CXL_PMU_FREEZE_REG
#define CXL_PMU_EVENT_CAP_REG(n)
#define CXL_PMU_EVENT_CAP_SUPPORTED_EVENTS_MSK
#define CXL_PMU_EVENT_CAP_GROUP_ID_MSK
#define CXL_PMU_EVENT_CAP_VENDOR_ID_MSK

#define CXL_PMU_COUNTER_CFG_REG(n)
#define CXL_PMU_COUNTER_CFG_TYPE_MSK
#define CXL_PMU_COUNTER_CFG_TYPE_FREE_RUN
#define CXL_PMU_COUNTER_CFG_TYPE_FIXED_FUN
#define CXL_PMU_COUNTER_CFG_TYPE_CONFIGURABLE
#define CXL_PMU_COUNTER_CFG_ENABLE
#define CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW
#define CXL_PMU_COUNTER_CFG_FREEZE_ON_OVRFLW
#define CXL_PMU_COUNTER_CFG_EDGE
#define CXL_PMU_COUNTER_CFG_INVERT
#define CXL_PMU_COUNTER_CFG_THRESHOLD_MSK
#define CXL_PMU_COUNTER_CFG_EVENTS_MSK
#define CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK

#define CXL_PMU_FILTER_CFG_REG(n, f)
#define CXL_PMU_FILTER_CFG_VALUE_MSK

#define CXL_PMU_COUNTER_REG(n)

/* CXL rev 3.0 Table 13-5 Events under CXL Vendor ID */
#define CXL_PMU_GID_CLOCK_TICKS
#define CXL_PMU_GID_D2H_REQ
#define CXL_PMU_GID_D2H_RSP
#define CXL_PMU_GID_H2D_REQ
#define CXL_PMU_GID_H2D_RSP
#define CXL_PMU_GID_CACHE_DATA
#define CXL_PMU_GID_M2S_REQ
#define CXL_PMU_GID_M2S_RWD
#define CXL_PMU_GID_M2S_BIRSP
#define CXL_PMU_GID_S2M_BISNP
#define CXL_PMU_GID_S2M_NDR
#define CXL_PMU_GID_S2M_DRS
#define CXL_PMU_GID_DDR

static int cxl_pmu_cpuhp_state_num;

struct cxl_pmu_ev_cap {};

#define CXL_PMU_MAX_COUNTERS
struct cxl_pmu_info {};

#define pmu_to_cxl_pmu_info(_pmu)

/*
 * All CPMU counters are discoverable via the Event Capabilities Registers.
 * Each Event Capability register contains a a VID / GroupID.
 * A counter may then count any combination (by summing) of events in
 * that group which are in the Supported Events Bitmask.
 * However, there are some complexities to the scheme.
 *  - Fixed function counters refer to an Event Capabilities register.
 *    That event capability register is not then used for Configurable
 *    counters.
 */
static int cxl_pmu_parse_caps(struct device *dev, struct cxl_pmu_info *info)
{}

#define CXL_PMU_FORMAT_ATTR(_name, _format)

enum {};

static struct attribute *cxl_pmu_format_attr[] =;

#define CXL_PMU_ATTR_CONFIG_MASK_MSK
#define CXL_PMU_ATTR_CONFIG_GID_MSK
#define CXL_PMU_ATTR_CONFIG_VID_MSK
#define CXL_PMU_ATTR_CONFIG1_THRESHOLD_MSK
#define CXL_PMU_ATTR_CONFIG1_INVERT_MSK
#define CXL_PMU_ATTR_CONFIG1_EDGE_MSK
#define CXL_PMU_ATTR_CONFIG1_FILTER_EN_MSK
#define CXL_PMU_ATTR_CONFIG2_HDM_MSK

static umode_t cxl_pmu_format_is_visible(struct kobject *kobj,
					 struct attribute *attr, int a)
{}

static const struct attribute_group cxl_pmu_format_group =;

static u32 cxl_pmu_config_get_mask(struct perf_event *event)
{}

static u16 cxl_pmu_config_get_gid(struct perf_event *event)
{}

static u16 cxl_pmu_config_get_vid(struct perf_event *event)
{}

static u8 cxl_pmu_config1_get_threshold(struct perf_event *event)
{}

static bool cxl_pmu_config1_get_invert(struct perf_event *event)
{}

static bool cxl_pmu_config1_get_edge(struct perf_event *event)
{}

/*
 * CPMU specification allows for 8 filters, each with a 32 bit value...
 * So we need to find 8x32bits to store it in.
 * As the value used for disable is 0xffff_ffff, a separate enable switch
 * is needed.
 */

static bool cxl_pmu_config1_hdm_filter_en(struct perf_event *event)
{}

static u16 cxl_pmu_config2_get_hdm_decoder(struct perf_event *event)
{}

static ssize_t cxl_pmu_event_sysfs_show(struct device *dev,
					struct device_attribute *attr, char *buf)
{}

#define CXL_PMU_EVENT_ATTR(_name, _vid, _gid, _msk)

/* For CXL spec defined events */
#define CXL_PMU_EVENT_CXL_ATTR(_name, _gid, _msk)

static struct attribute *cxl_pmu_event_attrs[] =;

static struct cxl_pmu_ev_cap *cxl_pmu_find_fixed_counter_ev_cap(struct cxl_pmu_info *info,
								int vid, int gid, int msk)
{}

static struct cxl_pmu_ev_cap *cxl_pmu_find_config_counter_ev_cap(struct cxl_pmu_info *info,
								 int vid, int gid, int msk)
{}

static umode_t cxl_pmu_event_is_visible(struct kobject *kobj, struct attribute *attr, int a)
{}

static const struct attribute_group cxl_pmu_events =;

static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
			    char *buf)
{}
static DEVICE_ATTR_RO(cpumask);

static struct attribute *cxl_pmu_cpumask_attrs[] =;

static const struct attribute_group cxl_pmu_cpumask_group =;

static const struct attribute_group *cxl_pmu_attr_groups[] =;

/* If counter_idx == NULL, don't try to allocate a counter. */
static int cxl_pmu_get_event_idx(struct perf_event *event, int *counter_idx,
				 int *event_idx)
{}

static int cxl_pmu_event_init(struct perf_event *event)
{}

static void cxl_pmu_enable(struct pmu *pmu)
{}

static void cxl_pmu_disable(struct pmu *pmu)
{}

static void cxl_pmu_event_start(struct perf_event *event, int flags)
{}

static u64 cxl_pmu_read_counter(struct perf_event *event)
{}

static void __cxl_pmu_read(struct perf_event *event, bool overflow)
{}

static void cxl_pmu_read(struct perf_event *event)
{}

static void cxl_pmu_event_stop(struct perf_event *event, int flags)
{}

static int cxl_pmu_event_add(struct perf_event *event, int flags)
{}

static void cxl_pmu_event_del(struct perf_event *event, int flags)
{}

static irqreturn_t cxl_pmu_irq(int irq, void *data)
{}

static void cxl_pmu_perf_unregister(void *_info)
{}

static void cxl_pmu_cpuhp_remove(void *_info)
{}

static int cxl_pmu_probe(struct device *dev)
{}

static struct cxl_driver cxl_pmu_driver =;

static int cxl_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
{}

static int cxl_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{}

static __init int cxl_pmu_init(void)
{}

static __exit void cxl_pmu_exit(void)
{}

MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_IMPORT_NS();
module_init();
module_exit(cxl_pmu_exit);
MODULE_ALIAS_CXL();