linux/drivers/perf/arm_cspmu/arm_cspmu.c

// SPDX-License-Identifier: GPL-2.0
/*
 * ARM CoreSight Architecture PMU driver.
 *
 * This driver adds support for uncore PMU based on ARM CoreSight Performance
 * Monitoring Unit Architecture. The PMU is accessible via MMIO registers and
 * like other uncore PMUs, it does not support process specific events and
 * cannot be used in sampling mode.
 *
 * This code is based on other uncore PMUs like ARM DSU PMU. It provides a
 * generic implementation to operate the PMU according to CoreSight PMU
 * architecture and ACPI ARM PMU table (APMT) documents below:
 *   - ARM CoreSight PMU architecture document number: ARM IHI 0091 A.a-00bet0.
 *   - APMT document number: ARM DEN0117.
 *
 * The user should refer to the vendor technical documentation to get details
 * about the supported events.
 *
 * Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 *
 */

#include <linux/acpi.h>
#include <linux/cacheinfo.h>
#include <linux/ctype.h>
#include <linux/interrupt.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>

#include "arm_cspmu.h"

#define PMUNAME
#define DRVNAME

#define ARM_CSPMU_CPUMASK_ATTR(_name, _config)

/*
 * CoreSight PMU Arch register offsets.
 */
#define PMEVCNTR_LO
#define PMEVCNTR_HI
#define PMEVTYPER
#define PMCCFILTR
#define PMEVFILTR
#define PMCNTENSET
#define PMCNTENCLR
#define PMINTENSET
#define PMINTENCLR
#define PMOVSCLR
#define PMOVSSET
#define PMCFGR
#define PMCR
#define PMIIDR

/* PMCFGR register field */
#define PMCFGR_NCG
#define PMCFGR_HDBG
#define PMCFGR_TRO
#define PMCFGR_SS
#define PMCFGR_FZO
#define PMCFGR_MSI
#define PMCFGR_UEN
#define PMCFGR_NA
#define PMCFGR_EX
#define PMCFGR_CCD
#define PMCFGR_CC
#define PMCFGR_SIZE
#define PMCFGR_N

/* PMCR register field */
#define PMCR_TRO
#define PMCR_HDBG
#define PMCR_FZO
#define PMCR_NA
#define PMCR_DP
#define PMCR_X
#define PMCR_D
#define PMCR_C
#define PMCR_P
#define PMCR_E

/* Each SET/CLR register supports up to 32 counters. */
#define ARM_CSPMU_SET_CLR_COUNTER_SHIFT
#define ARM_CSPMU_SET_CLR_COUNTER_NUM

/* Convert counter idx into SET/CLR register number. */
#define COUNTER_TO_SET_CLR_ID(idx)

/* Convert counter idx into SET/CLR register bit. */
#define COUNTER_TO_SET_CLR_BIT(idx)

#define ARM_CSPMU_ACTIVE_CPU_MASK
#define ARM_CSPMU_ASSOCIATED_CPU_MASK

/*
 * Maximum poll count for reading counter value using high-low-high sequence.
 */
#define HILOHI_MAX_POLL

static unsigned long arm_cspmu_cpuhp_state;

static DEFINE_MUTEX(arm_cspmu_lock);

static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
				    struct hw_perf_event *hwc, u32 filter);

static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev)
{}

/*
 * In CoreSight PMU architecture, all of the MMIO registers are 32-bit except
 * counter register. The counter register can be implemented as 32-bit or 64-bit
 * register depending on the value of PMCFGR.SIZE field. For 64-bit access,
 * single-copy 64-bit atomic support is implementation defined. APMT node flag
 * is used to identify if the PMU supports 64-bit single copy atomic. If 64-bit
 * single copy atomic is not supported, the driver treats the register as a pair
 * of 32-bit register.
 */

/*
 * Read 64-bit register as a pair of 32-bit registers using hi-lo-hi sequence.
 */
static u64 read_reg64_hilohi(const void __iomem *addr, u32 max_poll_count)
{}

/* Check if cycle counter is supported. */
static inline bool supports_cycle_counter(const struct arm_cspmu *cspmu)
{}

/* Get counter size, which is (PMCFGR_SIZE + 1). */
static inline u32 counter_size(const struct arm_cspmu *cspmu)
{}

/* Get counter mask. */
static inline u64 counter_mask(const struct arm_cspmu *cspmu)
{}

/* Check if counter is implemented as 64-bit register. */
static inline bool use_64b_counter_reg(const struct arm_cspmu *cspmu)
{}

ssize_t arm_cspmu_sysfs_event_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{}
EXPORT_SYMBOL_GPL();

/* Default event list. */
static struct attribute *arm_cspmu_event_attrs[] =;

static struct attribute **
arm_cspmu_get_event_attrs(const struct arm_cspmu *cspmu)
{}

static umode_t
arm_cspmu_event_attr_is_visible(struct kobject *kobj,
				struct attribute *attr, int unused)
{}

static struct attribute *arm_cspmu_format_attrs[] =;

static struct attribute **
arm_cspmu_get_format_attrs(const struct arm_cspmu *cspmu)
{}

static u32 arm_cspmu_event_type(const struct perf_event *event)
{}

static bool arm_cspmu_is_cycle_counter_event(const struct perf_event *event)
{}

static u32 arm_cspmu_event_filter(const struct perf_event *event)
{}

static ssize_t arm_cspmu_identifier_show(struct device *dev,
					 struct device_attribute *attr,
					 char *page)
{}

static struct device_attribute arm_cspmu_identifier_attr =;

static struct attribute *arm_cspmu_identifier_attrs[] =;

static struct attribute_group arm_cspmu_identifier_attr_group =;

static const char *arm_cspmu_get_identifier(const struct arm_cspmu *cspmu)
{}

static const char *arm_cspmu_type_str[ACPI_APMT_NODE_TYPE_COUNT] =;

static const char *arm_cspmu_get_name(const struct arm_cspmu *cspmu)
{}

static ssize_t arm_cspmu_cpumask_show(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
{}

static struct attribute *arm_cspmu_cpumask_attrs[] =;

static struct attribute_group arm_cspmu_cpumask_attr_group =;

static struct arm_cspmu_impl_match impl_match[] =;

static struct arm_cspmu_impl_match *arm_cspmu_impl_match_get(u32 pmiidr)
{}

#define DEFAULT_IMPL_OP(name)

static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
{}

static struct attribute_group *
arm_cspmu_alloc_event_attr_group(struct arm_cspmu *cspmu)
{}

static struct attribute_group *
arm_cspmu_alloc_format_attr_group(struct arm_cspmu *cspmu)
{}

static int arm_cspmu_alloc_attr_groups(struct arm_cspmu *cspmu)
{}

static inline void arm_cspmu_reset_counters(struct arm_cspmu *cspmu)
{}

static inline void arm_cspmu_start_counters(struct arm_cspmu *cspmu)
{}

static inline void arm_cspmu_stop_counters(struct arm_cspmu *cspmu)
{}

static void arm_cspmu_enable(struct pmu *pmu)
{}

static void arm_cspmu_disable(struct pmu *pmu)
{}

static int arm_cspmu_get_event_idx(struct arm_cspmu_hw_events *hw_events,
				struct perf_event *event)
{}

static bool arm_cspmu_validate_event(struct pmu *pmu,
				 struct arm_cspmu_hw_events *hw_events,
				 struct perf_event *event)
{}

/*
 * Make sure the group of events can be scheduled at once
 * on the PMU.
 */
static bool arm_cspmu_validate_group(struct perf_event *event)
{}

static int arm_cspmu_event_init(struct perf_event *event)
{}

static inline u32 counter_offset(u32 reg_sz, u32 ctr_idx)
{}

static void arm_cspmu_write_counter(struct perf_event *event, u64 val)
{}

static u64 arm_cspmu_read_counter(struct perf_event *event)
{}

/*
 * arm_cspmu_set_event_period: Set the period for the counter.
 *
 * To handle cases of extreme interrupt latency, we program
 * the counter with half of the max count for the counters.
 */
static void arm_cspmu_set_event_period(struct perf_event *event)
{}

static void arm_cspmu_enable_counter(struct arm_cspmu *cspmu, int idx)
{}

static void arm_cspmu_disable_counter(struct arm_cspmu *cspmu, int idx)
{}

static void arm_cspmu_event_update(struct perf_event *event)
{}

static inline void arm_cspmu_set_event(struct arm_cspmu *cspmu,
					struct hw_perf_event *hwc)
{}

static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
					struct hw_perf_event *hwc,
					u32 filter)
{}

static inline void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu, u32 filter)
{}

static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
{}

static void arm_cspmu_stop(struct perf_event *event, int pmu_flags)
{}

static inline u32 to_phys_idx(struct arm_cspmu *cspmu, u32 idx)
{}

static int arm_cspmu_add(struct perf_event *event, int flags)
{}

static void arm_cspmu_del(struct perf_event *event, int flags)
{}

static void arm_cspmu_read(struct perf_event *event)
{}

static struct arm_cspmu *arm_cspmu_alloc(struct platform_device *pdev)
{}

static int arm_cspmu_init_mmio(struct arm_cspmu *cspmu)
{}

static inline int arm_cspmu_get_reset_overflow(struct arm_cspmu *cspmu,
					       u32 *pmovs)
{}

static irqreturn_t arm_cspmu_handle_irq(int irq_num, void *dev)
{}

static int arm_cspmu_request_irq(struct arm_cspmu *cspmu)
{}

#if defined(CONFIG_ACPI) && defined(CONFIG_ARM64)
#include <acpi/processor.h>

static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
{
	struct device *cpu_dev;
	struct acpi_device *acpi_dev;

	cpu_dev = get_cpu_device(cpu);
	if (!cpu_dev)
		return -ENODEV;

	acpi_dev = ACPI_COMPANION(cpu_dev);
	while (acpi_dev) {
		if (acpi_dev_hid_uid_match(acpi_dev, ACPI_PROCESSOR_CONTAINER_HID, container_uid))
			return 0;

		acpi_dev = acpi_dev_parent(acpi_dev);
	}

	return -ENODEV;
}

static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
{
	struct acpi_apmt_node *apmt_node;
	int affinity_flag;
	int cpu;

	apmt_node = arm_cspmu_apmt_node(cspmu->dev);
	affinity_flag = apmt_node->flags & ACPI_APMT_FLAGS_AFFINITY;

	if (affinity_flag == ACPI_APMT_FLAGS_AFFINITY_PROC) {
		for_each_possible_cpu(cpu) {
			if (apmt_node->proc_affinity ==
			    get_acpi_id_for_cpu(cpu)) {
				cpumask_set_cpu(cpu, &cspmu->associated_cpus);
				break;
			}
		}
	} else {
		for_each_possible_cpu(cpu) {
			if (arm_cspmu_find_cpu_container(
				    cpu, apmt_node->proc_affinity))
				continue;

			cpumask_set_cpu(cpu, &cspmu->associated_cpus);
		}
	}

	return 0;
}
#else
static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
{}
#endif

static int arm_cspmu_of_get_cpus(struct arm_cspmu *cspmu)
{}

static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu)
{}

static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
{}

static int arm_cspmu_device_probe(struct platform_device *pdev)
{}

static void arm_cspmu_device_remove(struct platform_device *pdev)
{}

static const struct platform_device_id arm_cspmu_id[] =;
MODULE_DEVICE_TABLE(platform, arm_cspmu_id);

static const struct of_device_id arm_cspmu_of_match[] =;
MODULE_DEVICE_TABLE(of, arm_cspmu_of_match);

static struct platform_driver arm_cspmu_driver =;

static void arm_cspmu_set_active_cpu(int cpu, struct arm_cspmu *cspmu)
{}

static int arm_cspmu_cpu_online(unsigned int cpu, struct hlist_node *node)
{}

static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
{}

static int __init arm_cspmu_init(void)
{}

static void __exit arm_cspmu_exit(void)
{}

int arm_cspmu_impl_register(const struct arm_cspmu_impl_match *impl_match)
{}
EXPORT_SYMBOL_GPL();

static int arm_cspmu_match_device(struct device *dev, const void *match)
{}

void arm_cspmu_impl_unregister(const struct arm_cspmu_impl_match *impl_match)
{}
EXPORT_SYMBOL_GPL();

module_init();
module_exit(arm_cspmu_exit);

MODULE_DESCRIPTION();
MODULE_LICENSE();