/* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (C) 2024 Intel Corporation */ #ifndef __LIBETH_RX_H #define __LIBETH_RX_H #include <linux/if_vlan.h> #include <net/page_pool/helpers.h> #include <net/xdp.h> /* Rx buffer management */ /* Space reserved in front of each frame */ #define LIBETH_SKB_HEADROOM … /* Maximum headroom for worst-case calculations */ #define LIBETH_MAX_HEADROOM … /* Link layer / L2 overhead: Ethernet, 2 VLAN tags (C + S), FCS */ #define LIBETH_RX_LL_LEN … /* Maximum supported L2-L4 header length */ #define LIBETH_MAX_HEAD … /* Always use order-0 pages */ #define LIBETH_RX_PAGE_ORDER … /* Pick a sane buffer stride and align to a cacheline boundary */ #define LIBETH_RX_BUF_STRIDE … /* HW-writeable space in one buffer: truesize - headroom/tailroom, aligned */ #define LIBETH_RX_PAGE_LEN(hr) … /** * struct libeth_fqe - structure representing an Rx buffer (fill queue element) * @page: page holding the buffer * @offset: offset from the page start (to the headroom) * @truesize: total space occupied by the buffer (w/ headroom and tailroom) * * Depending on the MTU, API switches between one-page-per-frame and shared * page model (to conserve memory on bigger-page platforms). In case of the * former, @offset is always 0 and @truesize is always ```PAGE_SIZE```. */ struct libeth_fqe { … } __aligned_largest; /** * enum libeth_fqe_type - enum representing types of Rx buffers * @LIBETH_FQE_MTU: buffer size is determined by MTU * @LIBETH_FQE_SHORT: buffer size is smaller than MTU, for short frames * @LIBETH_FQE_HDR: buffer size is ```LIBETH_MAX_HEAD```-sized, for headers */ enum libeth_fqe_type { … }; /** * struct libeth_fq - structure representing a buffer (fill) queue * @fp: hotpath part of the structure * @pp: &page_pool for buffer management * @fqes: array of Rx buffers * @truesize: size to allocate per buffer, w/overhead * @count: number of descriptors/buffers the queue has * @type: type of the buffers this queue has * @hsplit: flag whether header split is enabled * @buf_len: HW-writeable length per each buffer * @nid: ID of the closest NUMA node with memory */ struct libeth_fq { … }; int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi); void libeth_rx_fq_destroy(struct libeth_fq *fq); /** * libeth_rx_alloc - allocate a new Rx buffer * @fq: fill queue to allocate for * @i: index of the buffer within the queue * * Return: DMA address to be passed to HW for Rx on successful allocation, * ```DMA_MAPPING_ERROR``` otherwise. */ static inline dma_addr_t libeth_rx_alloc(const struct libeth_fq_fp *fq, u32 i) { … } void libeth_rx_recycle_slow(struct page *page); /** * libeth_rx_sync_for_cpu - synchronize or recycle buffer post DMA * @fqe: buffer to process * @len: frame length from the descriptor * * Process the buffer after it's written by HW. The regular path is to * synchronize DMA for CPU, but in case of no data it will be immediately * recycled back to its PP. * * Return: true when there's data to process, false otherwise. */ static inline bool libeth_rx_sync_for_cpu(const struct libeth_fqe *fqe, u32 len) { … } /* Converting abstract packet type numbers into a software structure with * the packet parameters to do O(1) lookup on Rx. */ enum { … }; enum { … }; enum { … }; enum { … }; enum { … }; #define LIBETH_RX_PT_PAYLOAD_NONE … #define LIBETH_RX_PT_PAYLOAD_L2 … #define LIBETH_RX_PT_PAYLOAD_L3 … #define LIBETH_RX_PT_PAYLOAD_L4 … struct libeth_rx_pt { … }; void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt); /** * libeth_rx_pt_get_ip_ver - get IP version from a packet type structure * @pt: packet type params * * Wrapper to compile out the IPv6 code from the drivers when not supported * by the kernel. * * Return: @pt.outer_ip or stub for IPv6 when not compiled-in. */ static inline u32 libeth_rx_pt_get_ip_ver(struct libeth_rx_pt pt) { … } /* libeth_has_*() can be used to quickly check whether the HW metadata is * available to avoid further expensive processing such as descriptor reads. * They already check for the corresponding netdev feature to be enabled, * thus can be used as drop-in replacements. */ static inline bool libeth_rx_pt_has_checksum(const struct net_device *dev, struct libeth_rx_pt pt) { … } static inline bool libeth_rx_pt_has_hash(const struct net_device *dev, struct libeth_rx_pt pt) { … } /** * libeth_rx_pt_set_hash - fill in skb hash value basing on the PT * @skb: skb to fill the hash in * @hash: 32-bit hash value from the descriptor * @pt: packet type */ static inline void libeth_rx_pt_set_hash(struct sk_buff *skb, u32 hash, struct libeth_rx_pt pt) { … } #endif /* __LIBETH_RX_H */