linux/kernel/bpf/stackmap.c

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016 Facebook
 */
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
#include <linux/kernel.h>
#include <linux/stacktrace.h>
#include <linux/perf_event.h>
#include <linux/btf_ids.h>
#include <linux/buildid.h>
#include "percpu_freelist.h"
#include "mmap_unlock_work.h"

#define STACK_CREATE_FLAG_MASK

struct stack_map_bucket {};

struct bpf_stack_map {};

static inline bool stack_map_use_build_id(struct bpf_map *map)
{}

static inline int stack_map_data_size(struct bpf_map *map)
{}

static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
{}

/* Called from syscall */
static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
{}

static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
					  u64 *ips, u32 trace_nr, bool user)
{}

static struct perf_callchain_entry *
get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
{}

static long __bpf_get_stackid(struct bpf_map *map,
			      struct perf_callchain_entry *trace, u64 flags)
{}

BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
	   u64, flags)
{}

const struct bpf_func_proto bpf_get_stackid_proto =;

static __u64 count_kernel_ip(struct perf_callchain_entry *trace)
{}

BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
	   struct bpf_map *, map, u64, flags)
{}

const struct bpf_func_proto bpf_get_stackid_proto_pe =;

static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
			    struct perf_callchain_entry *trace_in,
			    void *buf, u32 size, u64 flags)
{}

BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
	   u64, flags)
{}

const struct bpf_func_proto bpf_get_stack_proto =;

BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
	   u32, size, u64, flags)
{}

const struct bpf_func_proto bpf_get_task_stack_proto =;

BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
	   void *, buf, u32, size, u64, flags)
{}

const struct bpf_func_proto bpf_get_stack_proto_pe =;

/* Called from eBPF program */
static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
{}

/* Called from syscall */
int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
{}

static int stack_map_get_next_key(struct bpf_map *map, void *key,
				  void *next_key)
{}

static long stack_map_update_elem(struct bpf_map *map, void *key, void *value,
				  u64 map_flags)
{}

/* Called from syscall or from eBPF program */
static long stack_map_delete_elem(struct bpf_map *map, void *key)
{}

/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
static void stack_map_free(struct bpf_map *map)
{}

static u64 stack_map_mem_usage(const struct bpf_map *map)
{}

BTF_ID_LIST_SINGLE(stack_trace_map_btf_ids, struct, bpf_stack_map)
const struct bpf_map_ops stack_trace_map_ops =;