/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Performance events: * * Copyright (C) 2008-2009, Thomas Gleixner <[email protected]> * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra * * Data type definitions, declarations, prototypes. * * Started by: Thomas Gleixner and Ingo Molnar * * For licencing details see kernel-base/COPYING */ #ifndef _UAPI_LINUX_PERF_EVENT_H #define _UAPI_LINUX_PERF_EVENT_H #include <linux/types.h> #include <linux/ioctl.h> #include <asm/byteorder.h> /* * User-space ABI bits: */ /* * attr.type */ enum perf_type_id { … }; /* * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA * AA: hardware event ID * EEEEEEEE: PMU type ID * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB * BB: hardware cache ID * CC: hardware cache op ID * DD: hardware cache op result ID * EEEEEEEE: PMU type ID * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied. */ #define PERF_PMU_TYPE_SHIFT … #define PERF_HW_EVENT_MASK … /* * Generalized performance event event_id types, used by the * attr.event_id parameter of the sys_perf_event_open() * syscall: */ enum perf_hw_id { … }; /* * Generalized hardware cache events: * * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x * { read, write, prefetch } x * { accesses, misses } */ enum perf_hw_cache_id { … }; enum perf_hw_cache_op_id { … }; enum perf_hw_cache_op_result_id { … }; /* * Special "software" events provided by the kernel, even if the hardware * does not support performance events. These events measure various * physical and sw events of the kernel (and allow the profiling of them as * well): */ enum perf_sw_ids { … }; /* * Bits that can be set in attr.sample_type to request information * in the overflow packets. */ enum perf_event_sample_format { … }; #define PERF_SAMPLE_WEIGHT_TYPE … /* * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set * * If the user does not pass priv level information via branch_sample_type, * the kernel uses the event's priv level. Branch and event priv levels do * not have to match. Branch priv level is checked for permissions. * * The branch types can be combined, however BRANCH_ANY covers all types * of branches and therefore it supersedes all the other types. */ enum perf_branch_sample_type_shift { … }; enum perf_branch_sample_type { … }; /* * Common flow change classification */ enum { … }; /* * Common branch speculation outcome classification */ enum { … }; enum { … }; enum { … }; #define PERF_BR_ARM64_FIQ … #define PERF_BR_ARM64_DEBUG_HALT … #define PERF_BR_ARM64_DEBUG_EXIT … #define PERF_BR_ARM64_DEBUG_INST … #define PERF_BR_ARM64_DEBUG_DATA … #define PERF_SAMPLE_BRANCH_PLM_ALL … /* * Values to determine ABI of the registers dump. */ enum perf_sample_regs_abi { … }; /* * Values for the memory transaction event qualifier, mostly for * abort events. Multiple bits can be set. */ enum { … }; /* * The format of the data returned by read() on a perf event fd, * as specified by attr.read_format: * * struct read_format { * { u64 value; * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING * { u64 id; } && PERF_FORMAT_ID * { u64 lost; } && PERF_FORMAT_LOST * } && !PERF_FORMAT_GROUP * * { u64 nr; * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING * { u64 value; * { u64 id; } && PERF_FORMAT_ID * { u64 lost; } && PERF_FORMAT_LOST * } cntr[nr]; * } && PERF_FORMAT_GROUP * }; */ enum perf_event_read_format { … }; #define PERF_ATTR_SIZE_VER0 … #define PERF_ATTR_SIZE_VER1 … #define PERF_ATTR_SIZE_VER2 … #define PERF_ATTR_SIZE_VER3 … /* add: sample_stack_user */ #define PERF_ATTR_SIZE_VER4 … #define PERF_ATTR_SIZE_VER5 … #define PERF_ATTR_SIZE_VER6 … #define PERF_ATTR_SIZE_VER7 … #define PERF_ATTR_SIZE_VER8 … /* * Hardware event_id to monitor via a performance monitoring event: * * @sample_max_stack: Max number of frame pointers in a callchain, * should be < /proc/sys/kernel/perf_event_max_stack */ struct perf_event_attr { … }; /* * Structure used by below PERF_EVENT_IOC_QUERY_BPF command * to query bpf programs attached to the same perf tracepoint * as the given perf event. */ struct perf_event_query_bpf { … }; /* * Ioctls that can be done on a perf event fd: */ #define PERF_EVENT_IOC_ENABLE … #define PERF_EVENT_IOC_DISABLE … #define PERF_EVENT_IOC_REFRESH … #define PERF_EVENT_IOC_RESET … #define PERF_EVENT_IOC_PERIOD … #define PERF_EVENT_IOC_SET_OUTPUT … #define PERF_EVENT_IOC_SET_FILTER … #define PERF_EVENT_IOC_ID … #define PERF_EVENT_IOC_SET_BPF … #define PERF_EVENT_IOC_PAUSE_OUTPUT … #define PERF_EVENT_IOC_QUERY_BPF … #define PERF_EVENT_IOC_MODIFY_ATTRIBUTES … enum perf_event_ioc_flags { … }; /* * Structure of the page that can be mapped via mmap */ struct perf_event_mmap_page { … }; /* * The current state of perf_event_header::misc bits usage: * ('|' used bit, '-' unused bit) * * 012 CDEF * |||---------|||| * * Where: * 0-2 CPUMODE_MASK * * C PROC_MAP_PARSE_TIMEOUT * D MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT * E MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT * F (reserved) */ #define PERF_RECORD_MISC_CPUMODE_MASK … #define PERF_RECORD_MISC_CPUMODE_UNKNOWN … #define PERF_RECORD_MISC_KERNEL … #define PERF_RECORD_MISC_USER … #define PERF_RECORD_MISC_HYPERVISOR … #define PERF_RECORD_MISC_GUEST_KERNEL … #define PERF_RECORD_MISC_GUEST_USER … /* * Indicates that /proc/PID/maps parsing are truncated by time out. */ #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT … /* * Following PERF_RECORD_MISC_* are used on different * events, so can reuse the same bit position: * * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event * PERF_RECORD_MISC_FORK_EXEC - PERF_RECORD_FORK event (perf internal) * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events */ #define PERF_RECORD_MISC_MMAP_DATA … #define PERF_RECORD_MISC_COMM_EXEC … #define PERF_RECORD_MISC_FORK_EXEC … #define PERF_RECORD_MISC_SWITCH_OUT … /* * These PERF_RECORD_MISC_* flags below are safely reused * for the following events: * * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events * PERF_RECORD_MISC_MMAP_BUILD_ID - PERF_RECORD_MMAP2 event * * * PERF_RECORD_MISC_EXACT_IP: * Indicates that the content of PERF_SAMPLE_IP points to * the actual instruction that triggered the event. See also * perf_event_attr::precise_ip. * * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: * Indicates that thread was preempted in TASK_RUNNING state. * * PERF_RECORD_MISC_MMAP_BUILD_ID: * Indicates that mmap2 event carries build id data. */ #define PERF_RECORD_MISC_EXACT_IP … #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT … #define PERF_RECORD_MISC_MMAP_BUILD_ID … /* * Reserve the last bit to indicate some extended misc field */ #define PERF_RECORD_MISC_EXT_RESERVED … struct perf_event_header { … }; struct perf_ns_link_info { … }; enum { … }; enum perf_event_type { … }; enum perf_record_ksymbol_type { … }; #define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER … enum perf_bpf_event_type { … }; #define PERF_MAX_STACK_DEPTH … #define PERF_MAX_CONTEXTS_PER_STACK … enum perf_callchain_context { … }; /** * PERF_RECORD_AUX::flags bits */ #define PERF_AUX_FLAG_TRUNCATED … #define PERF_AUX_FLAG_OVERWRITE … #define PERF_AUX_FLAG_PARTIAL … #define PERF_AUX_FLAG_COLLISION … #define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK … /* CoreSight PMU AUX buffer formats */ #define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT … #define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW … #define PERF_FLAG_FD_NO_GROUP … #define PERF_FLAG_FD_OUTPUT … #define PERF_FLAG_PID_CGROUP … #define PERF_FLAG_FD_CLOEXEC … #if defined(__LITTLE_ENDIAN_BITFIELD) perf_mem_data_src; #elif defined(__BIG_ENDIAN_BITFIELD) union perf_mem_data_src { __u64 val; struct { __u64 mem_rsvd:18, mem_hops:3, /* hop level */ mem_blk:3, /* access blocked */ mem_snoopx:2, /* snoop mode, ext */ mem_remote:1, /* remote */ mem_lvl_num:4, /* memory hierarchy level number */ mem_dtlb:7, /* tlb access */ mem_lock:2, /* lock instr */ mem_snoop:5, /* snoop mode */ mem_lvl:14, /* memory hierarchy level */ mem_op:5; /* type of opcode */ }; }; #else #error "Unknown endianness" #endif /* type of opcode (load/store/prefetch,code) */ #define PERF_MEM_OP_NA … #define PERF_MEM_OP_LOAD … #define PERF_MEM_OP_STORE … #define PERF_MEM_OP_PFETCH … #define PERF_MEM_OP_EXEC … #define PERF_MEM_OP_SHIFT … /* * PERF_MEM_LVL_* namespace being depricated to some extent in the * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields. * Supporting this namespace inorder to not break defined ABIs. * * memory hierarchy (memory level, hit or miss) */ #define PERF_MEM_LVL_NA … #define PERF_MEM_LVL_HIT … #define PERF_MEM_LVL_MISS … #define PERF_MEM_LVL_L1 … #define PERF_MEM_LVL_LFB … #define PERF_MEM_LVL_L2 … #define PERF_MEM_LVL_L3 … #define PERF_MEM_LVL_LOC_RAM … #define PERF_MEM_LVL_REM_RAM1 … #define PERF_MEM_LVL_REM_RAM2 … #define PERF_MEM_LVL_REM_CCE1 … #define PERF_MEM_LVL_REM_CCE2 … #define PERF_MEM_LVL_IO … #define PERF_MEM_LVL_UNC … #define PERF_MEM_LVL_SHIFT … #define PERF_MEM_REMOTE_REMOTE … #define PERF_MEM_REMOTE_SHIFT … #define PERF_MEM_LVLNUM_L1 … #define PERF_MEM_LVLNUM_L2 … #define PERF_MEM_LVLNUM_L3 … #define PERF_MEM_LVLNUM_L4 … #define PERF_MEM_LVLNUM_L2_MHB … #define PERF_MEM_LVLNUM_MSC … /* 0x7 available */ #define PERF_MEM_LVLNUM_UNC … #define PERF_MEM_LVLNUM_CXL … #define PERF_MEM_LVLNUM_IO … #define PERF_MEM_LVLNUM_ANY_CACHE … #define PERF_MEM_LVLNUM_LFB … #define PERF_MEM_LVLNUM_RAM … #define PERF_MEM_LVLNUM_PMEM … #define PERF_MEM_LVLNUM_NA … #define PERF_MEM_LVLNUM_SHIFT … /* snoop mode */ #define PERF_MEM_SNOOP_NA … #define PERF_MEM_SNOOP_NONE … #define PERF_MEM_SNOOP_HIT … #define PERF_MEM_SNOOP_MISS … #define PERF_MEM_SNOOP_HITM … #define PERF_MEM_SNOOP_SHIFT … #define PERF_MEM_SNOOPX_FWD … #define PERF_MEM_SNOOPX_PEER … #define PERF_MEM_SNOOPX_SHIFT … /* locked instruction */ #define PERF_MEM_LOCK_NA … #define PERF_MEM_LOCK_LOCKED … #define PERF_MEM_LOCK_SHIFT … /* TLB access */ #define PERF_MEM_TLB_NA … #define PERF_MEM_TLB_HIT … #define PERF_MEM_TLB_MISS … #define PERF_MEM_TLB_L1 … #define PERF_MEM_TLB_L2 … #define PERF_MEM_TLB_WK … #define PERF_MEM_TLB_OS … #define PERF_MEM_TLB_SHIFT … /* Access blocked */ #define PERF_MEM_BLK_NA … #define PERF_MEM_BLK_DATA … #define PERF_MEM_BLK_ADDR … #define PERF_MEM_BLK_SHIFT … /* hop level */ #define PERF_MEM_HOPS_0 … #define PERF_MEM_HOPS_1 … #define PERF_MEM_HOPS_2 … #define PERF_MEM_HOPS_3 … /* 5-7 available */ #define PERF_MEM_HOPS_SHIFT … #define PERF_MEM_S(a, s) … /* * single taken branch record layout: * * from: source instruction (may not always be a branch insn) * to: branch target * mispred: branch target was mispredicted * predicted: branch target was predicted * * support for mispred, predicted is optional. In case it * is not supported mispred = predicted = 0. * * in_tx: running in a hardware transaction * abort: aborting a hardware transaction * cycles: cycles from last branch (or 0 if not supported) * type: branch type * spec: branch speculation info (or 0 if not supported) */ struct perf_branch_entry { … }; /* Size of used info bits in struct perf_branch_entry */ #define PERF_BRANCH_ENTRY_INFO_BITS_MAX … perf_sample_weight; #endif /* _UAPI_LINUX_PERF_EVENT_H */