linux/include/net/xsk_buff_pool.h

/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2020 Intel Corporation. */

#ifndef XSK_BUFF_POOL_H_
#define XSK_BUFF_POOL_H_

#include <linux/if_xdp.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/bpf.h>
#include <net/xdp.h>

struct xsk_buff_pool;
struct xdp_rxq_info;
struct xsk_cb_desc;
struct xsk_queue;
struct xdp_desc;
struct xdp_umem;
struct xdp_sock;
struct device;
struct page;

#define XSK_PRIV_MAX

struct xdp_buff_xsk {};

#define XSK_CHECK_PRIV_TYPE(t)
#define XSK_TX_COMPL_FITS(t)

struct xsk_dma_map {};

struct xsk_buff_pool {};

/* Masks for xdp_umem_page flags.
 * The low 12-bits of the addr will be 0 since this is the page address, so we
 * can use them for flags.
 */
#define XSK_NEXT_PG_CONTIG_SHIFT
#define XSK_NEXT_PG_CONTIG_MASK

/* AF_XDP core. */
struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
						struct xdp_umem *umem);
int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
		  u16 queue_id, u16 flags);
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
			 struct net_device *dev, u16 queue_id);
int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
void xp_destroy(struct xsk_buff_pool *pool);
void xp_get_pool(struct xsk_buff_pool *pool);
bool xp_put_pool(struct xsk_buff_pool *pool);
void xp_clear_dev(struct xsk_buff_pool *pool);
void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);

/* AF_XDP, and XDP core. */
void xp_free(struct xdp_buff_xsk *xskb);

static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
				     u64 addr)
{}

static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
				    dma_addr_t *dma_pages, u64 addr)
{}

/* AF_XDP ZC drivers, via xdp_sock_buff.h */
void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc);
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
	       unsigned long attrs, struct page **pages, u32 nr_pages);
void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
{}

static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
{}

static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
{}

static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
					  dma_addr_t dma, size_t size)
{}

/* Masks for xdp_umem_page flags.
 * The low 12-bits of the addr will be 0 since this is the page address, so we
 * can use them for flags.
 */
#define XSK_NEXT_PG_CONTIG_SHIFT
#define XSK_NEXT_PG_CONTIG_MASK

static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
						 u64 addr, u32 len)
{}

static inline bool xp_mb_desc(struct xdp_desc *desc)
{}

static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
{}

static inline u64 xp_unaligned_extract_addr(u64 addr)
{}

static inline u64 xp_unaligned_extract_offset(u64 addr)
{}

static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
{}

static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
{}

static inline void xp_release(struct xdp_buff_xsk *xskb)
{}

static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
{}

static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool)
{}

#endif /* XSK_BUFF_POOL_H_ */