linux/drivers/net/ethernet/intel/libeth/rx.c

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2024 Intel Corporation */

#include <net/libeth/rx.h>

/* Rx buffer management */

/**
 * libeth_rx_hw_len_mtu - get the actual buffer size to be passed to HW
 * @pp: &page_pool_params of the netdev to calculate the size for
 * @max_len: maximum buffer size for a single descriptor
 *
 * Return: HW-writeable length per one buffer to pass it to the HW accounting:
 * MTU the @dev has, HW required alignment, minimum and maximum allowed values,
 * and system's page size.
 */
static u32 libeth_rx_hw_len_mtu(const struct page_pool_params *pp, u32 max_len)
{}

/**
 * libeth_rx_hw_len_truesize - get the short buffer size to be passed to HW
 * @pp: &page_pool_params of the netdev to calculate the size for
 * @max_len: maximum buffer size for a single descriptor
 * @truesize: desired truesize for the buffers
 *
 * Return: HW-writeable length per one buffer to pass it to the HW ignoring the
 * MTU and closest to the passed truesize. Can be used for "short" buffer
 * queues to fragment pages more efficiently.
 */
static u32 libeth_rx_hw_len_truesize(const struct page_pool_params *pp,
				     u32 max_len, u32 truesize)
{}

/**
 * libeth_rx_page_pool_params - calculate params with the stack overhead
 * @fq: buffer queue to calculate the size for
 * @pp: &page_pool_params of the netdev
 *
 * Set the PP params to will all needed stack overhead (headroom, tailroom) and
 * both the HW buffer length and the truesize for all types of buffers. For
 * "short" buffers, truesize never exceeds the "wanted" one; for the rest,
 * it can be up to the page size.
 *
 * Return: true on success, false on invalid input params.
 */
static bool libeth_rx_page_pool_params(struct libeth_fq *fq,
				       struct page_pool_params *pp)
{}

/**
 * libeth_rx_page_pool_params_zc - calculate params without the stack overhead
 * @fq: buffer queue to calculate the size for
 * @pp: &page_pool_params of the netdev
 *
 * Set the PP params to exclude the stack overhead and both the buffer length
 * and the truesize, which are equal for the data buffers. Note that this
 * requires separate header buffers to be always active and account the
 * overhead.
 * With the MTU == ``PAGE_SIZE``, this allows the kernel to enable the zerocopy
 * mode.
 *
 * Return: true on success, false on invalid input params.
 */
static bool libeth_rx_page_pool_params_zc(struct libeth_fq *fq,
					  struct page_pool_params *pp)
{}

/**
 * libeth_rx_fq_create - create a PP with the default libeth settings
 * @fq: buffer queue struct to fill
 * @napi: &napi_struct covering this PP (no usage outside its poll loops)
 *
 * Return: %0 on success, -%errno on failure.
 */
int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
{}
EXPORT_SYMBOL_NS_GPL();

/**
 * libeth_rx_fq_destroy - destroy a &page_pool created by libeth
 * @fq: buffer queue to process
 */
void libeth_rx_fq_destroy(struct libeth_fq *fq)
{}
EXPORT_SYMBOL_NS_GPL();

/**
 * libeth_rx_recycle_slow - recycle a libeth page from the NAPI context
 * @page: page to recycle
 *
 * To be used on exceptions or rare cases not requiring fast inline recycling.
 */
void libeth_rx_recycle_slow(struct page *page)
{}
EXPORT_SYMBOL_NS_GPL();

/* Converting abstract packet type numbers into a software structure with
 * the packet parameters to do O(1) lookup on Rx.
 */

static const u16 libeth_rx_pt_xdp_oip[] =;

static const u16 libeth_rx_pt_xdp_iprot[] =;

static const u16 libeth_rx_pt_xdp_pl[] =;

/**
 * libeth_rx_pt_gen_hash_type - generate an XDP RSS hash type for a PT
 * @pt: PT structure to evaluate
 *
 * Generates ```hash_type``` field with XDP RSS type values from the parsed
 * packet parameters if they're obtained dynamically at runtime.
 */
void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt)
{}
EXPORT_SYMBOL_NS_GPL();

/* Module */

MODULE_DESCRIPTION();
MODULE_LICENSE();