linux/kernel/bpf/cpumap.c

// SPDX-License-Identifier: GPL-2.0-only
/* bpf/cpumap.c
 *
 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
 */

/**
 * DOC: cpu map
 * The 'cpumap' is primarily used as a backend map for XDP BPF helper
 * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'.
 *
 * Unlike devmap which redirects XDP frames out to another NIC device,
 * this map type redirects raw XDP frames to another CPU.  The remote
 * CPU will do SKB-allocation and call the normal network stack.
 */
/*
 * This is a scalability and isolation mechanism, that allow
 * separating the early driver network XDP layer, from the rest of the
 * netstack, and assigning dedicated CPUs for this stage.  This
 * basically allows for 10G wirespeed pre-filtering via bpf.
 */
#include <linux/bitops.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/ptr_ring.h>
#include <net/xdp.h>
#include <net/hotdata.h>

#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/completion.h>
#include <trace/events/xdp.h>
#include <linux/btf_ids.h>

#include <linux/netdevice.h>   /* netif_receive_skb_list */
#include <linux/etherdevice.h> /* eth_type_trans */

/* General idea: XDP packets getting XDP redirected to another CPU,
 * will maximum be stored/queued for one driver ->poll() call.  It is
 * guaranteed that queueing the frame and the flush operation happen on
 * same CPU.  Thus, cpu_map_flush operation can deduct via this_cpu_ptr()
 * which queue in bpf_cpu_map_entry contains packets.
 */

#define CPU_MAP_BULK_SIZE
struct bpf_cpu_map_entry;
struct bpf_cpu_map;

struct xdp_bulk_queue {};

/* Struct for every remote "destination" CPU in map */
struct bpf_cpu_map_entry {};

struct bpf_cpu_map {};

static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
{}

static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
{}

static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
				     struct list_head *listp,
				     struct xdp_cpumap_stats *stats)
{}

static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
				    void **frames, int n,
				    struct xdp_cpumap_stats *stats)
{}

#define CPUMAP_BATCH

static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
				int xdp_n, struct xdp_cpumap_stats *stats,
				struct list_head *list)
{}

static int cpu_map_kthread_run(void *data)
{}

static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu,
				      struct bpf_map *map, int fd)
{}

static struct bpf_cpu_map_entry *
__cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
		      u32 cpu)
{}

static void __cpu_map_entry_free(struct work_struct *work)
{}

/* After the xchg of the bpf_cpu_map_entry pointer, we need to make sure the old
 * entry is no longer in use before freeing. We use queue_rcu_work() to call
 * __cpu_map_entry_free() in a separate workqueue after waiting for an RCU grace
 * period. This means that (a) all pending enqueue and flush operations have
 * completed (because of the RCU callback), and (b) we are in a workqueue
 * context where we can stop the kthread and wait for it to exit before freeing
 * everything.
 */
static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
				    u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
{}

static long cpu_map_delete_elem(struct bpf_map *map, void *key)
{}

static long cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
				u64 map_flags)
{}

static void cpu_map_free(struct bpf_map *map)
{}

/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
 * by local_bh_disable() (from XDP calls inside NAPI). The
 * rcu_read_lock_bh_held() below makes lockdep accept both.
 */
static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{}

static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
{}

static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{}

static long cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags)
{}

static u64 cpu_map_mem_usage(const struct bpf_map *map)
{}

BTF_ID_LIST_SINGLE(cpu_map_btf_ids, struct, bpf_cpu_map)
const struct bpf_map_ops cpu_map_ops =;

static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
{}

/* Runs under RCU-read-side, plus in softirq under NAPI protection.
 * Thus, safe percpu variable access.
 */
static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
{}

int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
		    struct net_device *dev_rx)
{}

int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
			     struct sk_buff *skb)
{}

void __cpu_map_flush(struct list_head *flush_list)
{}