linux/kernel/bpf/ringbuf.c

#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/err.h>
#include <linux/irq_work.h>
#include <linux/slab.h>
#include <linux/filter.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/kmemleak.h>
#include <uapi/linux/btf.h>
#include <linux/btf_ids.h>

#define RINGBUF_CREATE_FLAG_MASK

/* non-mmap()'able part of bpf_ringbuf (everything up to consumer page) */
#define RINGBUF_PGOFF
/* consumer page and producer page */
#define RINGBUF_POS_PAGES
#define RINGBUF_NR_META_PAGES

#define RINGBUF_MAX_RECORD_SZ

struct bpf_ringbuf {};

struct bpf_ringbuf_map {};

/* 8-byte ring buffer record header structure */
struct bpf_ringbuf_hdr {};

static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
{}

static void bpf_ringbuf_notify(struct irq_work *work)
{}

/* Maximum size of ring buffer area is limited by 32-bit page offset within
 * record header, counted in pages. Reserve 8 bits for extensibility, and
 * take into account few extra pages for consumer/producer pages and
 * non-mmap()'able parts, the current maximum size would be:
 *
 *     (((1ULL << 24) - RINGBUF_POS_PAGES - RINGBUF_PGOFF) * PAGE_SIZE)
 *
 * This gives 64GB limit, which seems plenty for single ring buffer. Now
 * considering that the maximum value of data_sz is (4GB - 1), there
 * will be no overflow, so just note the size limit in the comments.
 */
static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
{}

static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
{}

static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
{}

static void ringbuf_map_free(struct bpf_map *map)
{}

static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key)
{}

static long ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
				    u64 flags)
{}

static long ringbuf_map_delete_elem(struct bpf_map *map, void *key)
{}

static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
				    void *next_key)
{}

static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma)
{}

static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma)
{}

static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb)
{}

static u32 ringbuf_total_data_sz(const struct bpf_ringbuf *rb)
{}

static __poll_t ringbuf_map_poll_kern(struct bpf_map *map, struct file *filp,
				      struct poll_table_struct *pts)
{}

static __poll_t ringbuf_map_poll_user(struct bpf_map *map, struct file *filp,
				      struct poll_table_struct *pts)
{}

static u64 ringbuf_map_mem_usage(const struct bpf_map *map)
{}

BTF_ID_LIST_SINGLE(ringbuf_map_btf_ids, struct, bpf_ringbuf_map)
const struct bpf_map_ops ringbuf_map_ops =;

BTF_ID_LIST_SINGLE(user_ringbuf_map_btf_ids, struct, bpf_ringbuf_map)
const struct bpf_map_ops user_ringbuf_map_ops =;

/* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself,
 * calculate offset from record metadata to ring buffer in pages, rounded
 * down. This page offset is stored as part of record metadata and allows to
 * restore struct bpf_ringbuf * from record pointer. This page offset is
 * stored at offset 4 of record metadata header.
 */
static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb,
				     struct bpf_ringbuf_hdr *hdr)
{}

/* Given pointer to ring buffer record header, restore pointer to struct
 * bpf_ringbuf itself by using page offset stored at offset 4
 */
static struct bpf_ringbuf *
bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr)
{}

static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
{}

BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags)
{}

const struct bpf_func_proto bpf_ringbuf_reserve_proto =;

static void bpf_ringbuf_commit(void *sample, u64 flags, bool discard)
{}

BPF_CALL_2(bpf_ringbuf_submit, void *, sample, u64, flags)
{}

const struct bpf_func_proto bpf_ringbuf_submit_proto =;

BPF_CALL_2(bpf_ringbuf_discard, void *, sample, u64, flags)
{}

const struct bpf_func_proto bpf_ringbuf_discard_proto =;

BPF_CALL_4(bpf_ringbuf_output, struct bpf_map *, map, void *, data, u64, size,
	   u64, flags)
{}

const struct bpf_func_proto bpf_ringbuf_output_proto =;

BPF_CALL_2(bpf_ringbuf_query, struct bpf_map *, map, u64, flags)
{}

const struct bpf_func_proto bpf_ringbuf_query_proto =;

BPF_CALL_4(bpf_ringbuf_reserve_dynptr, struct bpf_map *, map, u32, size, u64, flags,
	   struct bpf_dynptr_kern *, ptr)
{}

const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto =;

BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
{}

const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto =;

BPF_CALL_2(bpf_ringbuf_discard_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
{}

const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto =;

static int __bpf_user_ringbuf_peek(struct bpf_ringbuf *rb, void **sample, u32 *size)
{}

static void __bpf_user_ringbuf_sample_release(struct bpf_ringbuf *rb, size_t size, u64 flags)
{}

BPF_CALL_4(bpf_user_ringbuf_drain, struct bpf_map *, map,
	   void *, callback_fn, void *, callback_ctx, u64, flags)
{}

const struct bpf_func_proto bpf_user_ringbuf_drain_proto =;