linux/net/xdp/xsk_buff_pool.c

// SPDX-License-Identifier: GPL-2.0

#include <net/xsk_buff_pool.h>
#include <net/xdp_sock.h>
#include <net/xdp_sock_drv.h>

#include "xsk_queue.h"
#include "xdp_umem.h"
#include "xsk.h"

void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
{}

void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
{}

void xp_destroy(struct xsk_buff_pool *pool)
{}

int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs)
{}

struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
						struct xdp_umem *umem)
{}

void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
{}
EXPORT_SYMBOL();

void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc)
{}
EXPORT_SYMBOL();

static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
{}

#define NETDEV_XDP_ACT_ZC

int xp_assign_dev(struct xsk_buff_pool *pool,
		  struct net_device *netdev, u16 queue_id, u16 flags)
{}

int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
			 struct net_device *dev, u16 queue_id)
{}

void xp_clear_dev(struct xsk_buff_pool *pool)
{}

static void xp_release_deferred(struct work_struct *work)
{}

void xp_get_pool(struct xsk_buff_pool *pool)
{}

bool xp_put_pool(struct xsk_buff_pool *pool)
{}

static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool)
{}

static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_device *netdev,
					     u32 nr_pages, struct xdp_umem *umem)
{}

static void xp_destroy_dma_map(struct xsk_dma_map *dma_map)
{}

static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs)
{}

void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
{}
EXPORT_SYMBOL();

static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map)
{}

static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map)
{}

int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
	       unsigned long attrs, struct page **pages, u32 nr_pages)
{}
EXPORT_SYMBOL();

static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool,
					  u64 addr)
{}

static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr)
{}

static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr)
{}

static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
{}

struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
{}
EXPORT_SYMBOL();

static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
{}

static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries)
{}

u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
{}
EXPORT_SYMBOL();

bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
{}
EXPORT_SYMBOL();

void xp_free(struct xdp_buff_xsk *xskb)
{}
EXPORT_SYMBOL();

void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
{}
EXPORT_SYMBOL();

dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
{}
EXPORT_SYMBOL();