linux/net/core/xdp.c

// SPDX-License-Identifier: GPL-2.0-only
/* net/core/xdp.c
 *
 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
 */
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/filter.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/rhashtable.h>
#include <linux/bug.h>
#include <net/page_pool/helpers.h>

#include <net/hotdata.h>
#include <net/xdp.h>
#include <net/xdp_priv.h> /* struct xdp_mem_allocator */
#include <trace/events/xdp.h>
#include <net/xdp_sock_drv.h>

#define REG_STATE_NEW
#define REG_STATE_REGISTERED
#define REG_STATE_UNREGISTERED
#define REG_STATE_UNUSED

static DEFINE_IDA(mem_id_pool);
static DEFINE_MUTEX(mem_id_lock);
#define MEM_ID_MAX
#define MEM_ID_MIN
static int mem_id_next =;

static bool mem_id_init; /* false */
static struct rhashtable *mem_id_ht;

static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
{}

static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
			  const void *ptr)
{}

static const struct rhashtable_params mem_id_rht_params =;

static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
{}

static void mem_xa_remove(struct xdp_mem_allocator *xa)
{}

static void mem_allocator_disconnect(void *allocator)
{}

void xdp_unreg_mem_model(struct xdp_mem_info *mem)
{}
EXPORT_SYMBOL_GPL();

void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
{}
EXPORT_SYMBOL_GPL();

void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
{}
EXPORT_SYMBOL_GPL();

static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
{}

/* Returns 0 on success, negative on failure */
int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
		       struct net_device *dev, u32 queue_index,
		       unsigned int napi_id, u32 frag_size)
{}
EXPORT_SYMBOL_GPL();

void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
{}
EXPORT_SYMBOL_GPL();

bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
{}
EXPORT_SYMBOL_GPL();

static int __mem_id_init_hash_table(void)
{}

/* Allocate a cyclic ID that maps to allocator pointer.
 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
 *
 * Caller must lock mem_id_lock.
 */
static int __mem_id_cyclic_get(gfp_t gfp)
{}

static bool __is_supported_mem_type(enum xdp_mem_type type)
{}

static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
						     enum xdp_mem_type type,
						     void *allocator)
{}

int xdp_reg_mem_model(struct xdp_mem_info *mem,
		      enum xdp_mem_type type, void *allocator)
{}
EXPORT_SYMBOL_GPL();

int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
			       enum xdp_mem_type type, void *allocator)
{}

EXPORT_SYMBOL_GPL();

/* XDP RX runs under NAPI protection, and in different delivery error
 * scenarios (e.g. queue full), it is possible to return the xdp_frame
 * while still leveraging this protection.  The @napi_direct boolean
 * is used for those calls sites.  Thus, allowing for faster recycling
 * of xdp_frames/pages in those cases.
 */
void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
		  struct xdp_buff *xdp)
{}

void xdp_return_frame(struct xdp_frame *xdpf)
{}
EXPORT_SYMBOL_GPL();

void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
{}
EXPORT_SYMBOL_GPL();

/* XDP bulk APIs introduce a defer/flush mechanism to return
 * pages belonging to the same xdp_mem_allocator object
 * (identified via the mem.id field) in bulk to optimize
 * I-cache and D-cache.
 * The bulk queue size is set to 16 to be aligned to how
 * XDP_REDIRECT bulking works. The bulk is flushed when
 * it is full or when mem.id changes.
 * xdp_frame_bulk is usually stored/allocated on the function
 * call-stack to avoid locking penalties.
 */
void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
{}
EXPORT_SYMBOL_GPL();

/* Must be called with rcu_read_lock held */
void xdp_return_frame_bulk(struct xdp_frame *xdpf,
			   struct xdp_frame_bulk *bq)
{}
EXPORT_SYMBOL_GPL();

void xdp_return_buff(struct xdp_buff *xdp)
{}
EXPORT_SYMBOL_GPL();

void xdp_attachment_setup(struct xdp_attachment_info *info,
			  struct netdev_bpf *bpf)
{}
EXPORT_SYMBOL_GPL();

struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
{}
EXPORT_SYMBOL_GPL();

/* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */
void xdp_warn(const char *msg, const char *func, const int line)
{
	WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
};
EXPORT_SYMBOL_GPL();

int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
{}
EXPORT_SYMBOL_GPL();

struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
					   struct sk_buff *skb,
					   struct net_device *dev)
{}
EXPORT_SYMBOL_GPL();

struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
					 struct net_device *dev)
{}
EXPORT_SYMBOL_GPL();

struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
{}

__bpf_kfunc_start_defs();

/**
 * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp.
 * @ctx: XDP context pointer.
 * @timestamp: Return value pointer.
 *
 * Return:
 * * Returns 0 on success or ``-errno`` on error.
 * * ``-EOPNOTSUPP`` : means device driver does not implement kfunc
 * * ``-ENODATA``    : means no RX-timestamp available for this frame
 */
__bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
{}

/**
 * bpf_xdp_metadata_rx_hash - Read XDP frame RX hash.
 * @ctx: XDP context pointer.
 * @hash: Return value pointer.
 * @rss_type: Return value pointer for RSS type.
 *
 * The RSS hash type (@rss_type) specifies what portion of packet headers NIC
 * hardware used when calculating RSS hash value.  The RSS type can be decoded
 * via &enum xdp_rss_hash_type either matching on individual L3/L4 bits
 * ``XDP_RSS_L*`` or by combined traditional *RSS Hashing Types*
 * ``XDP_RSS_TYPE_L*``.
 *
 * Return:
 * * Returns 0 on success or ``-errno`` on error.
 * * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc
 * * ``-ENODATA``    : means no RX-hash available for this frame
 */
__bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash,
					 enum xdp_rss_hash_type *rss_type)
{}

/**
 * bpf_xdp_metadata_rx_vlan_tag - Get XDP packet outermost VLAN tag
 * @ctx: XDP context pointer.
 * @vlan_proto: Destination pointer for VLAN Tag protocol identifier (TPID).
 * @vlan_tci: Destination pointer for VLAN TCI (VID + DEI + PCP)
 *
 * In case of success, ``vlan_proto`` contains *Tag protocol identifier (TPID)*,
 * usually ``ETH_P_8021Q`` or ``ETH_P_8021AD``, but some networks can use
 * custom TPIDs. ``vlan_proto`` is stored in **network byte order (BE)**
 * and should be used as follows:
 * ``if (vlan_proto == bpf_htons(ETH_P_8021Q)) do_something();``
 *
 * ``vlan_tci`` contains the remaining 16 bits of a VLAN tag.
 * Driver is expected to provide those in **host byte order (usually LE)**,
 * so the bpf program should not perform byte conversion.
 * According to 802.1Q standard, *VLAN TCI (Tag control information)*
 * is a bit field that contains:
 * *VLAN identifier (VID)* that can be read with ``vlan_tci & 0xfff``,
 * *Drop eligible indicator (DEI)* - 1 bit,
 * *Priority code point (PCP)* - 3 bits.
 * For detailed meaning of DEI and PCP, please refer to other sources.
 *
 * Return:
 * * Returns 0 on success or ``-errno`` on error.
 * * ``-EOPNOTSUPP`` : device driver doesn't implement kfunc
 * * ``-ENODATA``    : VLAN tag was not stripped or is not available
 */
__bpf_kfunc int bpf_xdp_metadata_rx_vlan_tag(const struct xdp_md *ctx,
					     __be16 *vlan_proto, u16 *vlan_tci)
{}

__bpf_kfunc_end_defs();

BTF_KFUNCS_START(xdp_metadata_kfunc_ids)
#define XDP_METADATA_KFUNC
XDP_METADATA_KFUNC_xxx
#undef XDP_METADATA_KFUNC
BTF_KFUNCS_END()

static const struct btf_kfunc_id_set xdp_metadata_kfunc_set =;

BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted)
#define XDP_METADATA_KFUNC
XDP_METADATA_KFUNC_xxx
#undef XDP_METADATA_KFUNC

u32 bpf_xdp_metadata_kfunc_id(int id)
{}

bool bpf_dev_bound_kfunc_id(u32 btf_id)
{}

static int __init xdp_metadata_init(void)
{}
late_initcall(xdp_metadata_init);

void xdp_set_features_flag(struct net_device *dev, xdp_features_t val)
{}
EXPORT_SYMBOL_GPL();

void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
{}
EXPORT_SYMBOL_GPL();

void xdp_features_clear_redirect_target(struct net_device *dev)
{}
EXPORT_SYMBOL_GPL();