#include <linux/bitops.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/ptr_ring.h>
#include <net/xdp.h>
#include <net/hotdata.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/completion.h>
#include <trace/events/xdp.h>
#include <linux/btf_ids.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#define CPU_MAP_BULK_SIZE …
struct bpf_cpu_map_entry;
struct bpf_cpu_map;
struct xdp_bulk_queue { … };
struct bpf_cpu_map_entry { … };
struct bpf_cpu_map { … };
static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
{ … }
static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
{ … }
static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
struct list_head *listp,
struct xdp_cpumap_stats *stats)
{ … }
static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
void **frames, int n,
struct xdp_cpumap_stats *stats)
{ … }
#define CPUMAP_BATCH …
static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
int xdp_n, struct xdp_cpumap_stats *stats,
struct list_head *list)
{ … }
static int cpu_map_kthread_run(void *data)
{ … }
static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu,
struct bpf_map *map, int fd)
{ … }
static struct bpf_cpu_map_entry *
__cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
u32 cpu)
{ … }
static void __cpu_map_entry_free(struct work_struct *work)
{ … }
static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
{ … }
static long cpu_map_delete_elem(struct bpf_map *map, void *key)
{ … }
static long cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
u64 map_flags)
{ … }
static void cpu_map_free(struct bpf_map *map)
{ … }
static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{ … }
static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
{ … }
static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{ … }
static long cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags)
{ … }
static u64 cpu_map_mem_usage(const struct bpf_map *map)
{ … }
BTF_ID_LIST_SINGLE(cpu_map_btf_ids, struct, bpf_cpu_map)
const struct bpf_map_ops cpu_map_ops = …;
static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
{ … }
static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
{ … }
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
struct net_device *dev_rx)
{ … }
int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
struct sk_buff *skb)
{ … }
void __cpu_map_flush(struct list_head *flush_list)
{ … }