#define pr_fmt(fmt) …
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/nmi.h>
#include <asm/cpufeature.h>
#include <asm/hardirq.h>
#include <asm/apic.h>
#include "../perf_event.h"
static u64 zx_pmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = …;
static struct event_constraint zxc_event_constraints[] __read_mostly = …;
static struct event_constraint zxd_event_constraints[] __read_mostly = …;
static __initconst const u64 zxd_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
static __initconst const u64 zxe_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = …;
static void zhaoxin_pmu_disable_all(void)
{ … }
static void zhaoxin_pmu_enable_all(int added)
{ … }
static inline u64 zhaoxin_pmu_get_status(void)
{ … }
static inline void zhaoxin_pmu_ack_status(u64 ack)
{ … }
static inline void zxc_pmu_ack_status(u64 ack)
{ … }
static void zhaoxin_pmu_disable_fixed(struct hw_perf_event *hwc)
{ … }
static void zhaoxin_pmu_disable_event(struct perf_event *event)
{ … }
static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc)
{ … }
static void zhaoxin_pmu_enable_event(struct perf_event *event)
{ … }
static int zhaoxin_pmu_handle_irq(struct pt_regs *regs)
{ … }
static u64 zhaoxin_pmu_event_map(int hw_event)
{ … }
static struct event_constraint *
zhaoxin_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{ … }
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
PMU_FORMAT_ATTR(…);
static struct attribute *zx_arch_formats_attr[] = …;
static ssize_t zhaoxin_event_sysfs_show(char *page, u64 config)
{ … }
static const struct x86_pmu zhaoxin_pmu __initconst = …;
static const struct { … } zx_arch_events_map[] __initconst = …;
static __init void zhaoxin_arch_events_quirk(void)
{ … }
__init int zhaoxin_pmu_init(void)
{ … }