#ifndef _LINUX_XSK_QUEUE_H
#define _LINUX_XSK_QUEUE_H
#include <linux/types.h>
#include <linux/if_xdp.h>
#include <net/xdp_sock.h>
#include <net/xsk_buff_pool.h>
#include "xsk.h"
struct xdp_ring { … };
struct xdp_rxtx_ring { … };
struct xdp_umem_ring { … };
struct xsk_queue { … };
struct parsed_desc { … };
static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
{ … }
static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
{ … }
static inline bool xp_unused_options_set(u32 options)
{ … }
static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
struct xdp_desc *desc)
{ … }
static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
struct xdp_desc *desc)
{ … }
static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
struct xdp_desc *desc)
{ … }
static inline bool xskq_has_descs(struct xsk_queue *q)
{ … }
static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
struct xdp_desc *d,
struct xsk_buff_pool *pool)
{ … }
static inline bool xskq_cons_read_desc(struct xsk_queue *q,
struct xdp_desc *desc,
struct xsk_buff_pool *pool)
{ … }
static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
{ … }
static inline void parse_desc(struct xsk_queue *q, struct xsk_buff_pool *pool,
struct xdp_desc *desc, struct parsed_desc *parsed)
{ … }
static inline
u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
u32 max)
{ … }
static inline void __xskq_cons_release(struct xsk_queue *q)
{ … }
static inline void __xskq_cons_peek(struct xsk_queue *q)
{ … }
static inline void xskq_cons_get_entries(struct xsk_queue *q)
{ … }
static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
{ … }
static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
{ … }
static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
{ … }
static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
struct xdp_desc *desc,
struct xsk_buff_pool *pool)
{ … }
static inline void xskq_cons_release(struct xsk_queue *q)
{ … }
static inline void xskq_cons_cancel_n(struct xsk_queue *q, u32 cnt)
{ … }
static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
{ … }
static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
{ … }
static inline bool xskq_prod_is_full(struct xsk_queue *q)
{ … }
static inline void xskq_prod_cancel_n(struct xsk_queue *q, u32 cnt)
{ … }
static inline int xskq_prod_reserve(struct xsk_queue *q)
{ … }
static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
{ … }
static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
u32 nb_entries)
{ … }
static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
u64 addr, u32 len, u32 flags)
{ … }
static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
{ … }
static inline void xskq_prod_submit(struct xsk_queue *q)
{ … }
static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
{ … }
static inline bool xskq_prod_is_empty(struct xsk_queue *q)
{ … }
static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
{ … }
static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
{ … }
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
void xskq_destroy(struct xsk_queue *q_ops);
#endif