linux/drivers/net/ethernet/fungible/funeth/funeth_rx.c

// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)

#include <linux/bpf_trace.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/filter.h>
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include "funeth_txrx.h"
#include "funeth.h"
#include "fun_queue.h"

#define CREATE_TRACE_POINTS
#include "funeth_trace.h"

/* Given the device's max supported MTU and pages of at least 4KB a packet can
 * be scattered into at most 4 buffers.
 */
#define RX_MAX_FRAGS

/* Per packet headroom in non-XDP mode. Present only for 1-frag packets. */
#define FUN_RX_HEADROOM

/* We try to reuse pages for our buffers. To avoid frequent page ref writes we
 * take EXTRA_PAGE_REFS references at once and then hand them out one per packet
 * occupying the buffer.
 */
#define EXTRA_PAGE_REFS
#define MIN_PAGE_REFS

enum {};

/* See if a page is running low on refs we are holding and if so take more. */
static void refresh_refs(struct funeth_rxbuf *buf)
{}

/* Offer a buffer to the Rx buffer cache. The cache will hold the buffer if its
 * page is worth retaining and there's room for it. Otherwise the page is
 * unmapped and our references released.
 */
static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf)
{}

/* Get a page from the Rx buffer cache. We only consider the next available
 * page and return it if we own all its references.
 */
static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb)
{}

/* Allocate and DMA-map a page for receive. */
static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb,
			     int node, gfp_t gfp)
{}

static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb)
{}

/* Run the XDP program assigned to an Rx queue.
 * Return %NULL if the buffer is consumed, or the virtual address of the packet
 * to turn into an skb.
 */
static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
			 int ref_ok, struct funeth_txq *xdp_q)
{}

/* A CQE contains a fixed completion structure along with optional metadata and
 * even packet data. Given the start address of a CQE return the start of the
 * contained fixed structure, which lies at the end.
 */
static const void *cqe_to_info(const void *cqe)
{}

/* The inverse of cqe_to_info(). */
static const void *info_to_cqe(const void *cqe_info)
{}

/* Return the type of hash provided by the device based on the L3 and L4
 * protocols it parsed for the packet.
 */
static enum pkt_hash_types cqe_to_pkt_hash_type(u16 pkt_parse)
{}

/* Each received packet can be scattered across several Rx buffers or can
 * share a buffer with previously received packets depending on the buffer
 * and packet sizes and the room available in the most recently used buffer.
 *
 * The rules are:
 * - If the buffer at the head of an RQ has not been used it gets (part of) the
 *   next incoming packet.
 * - Otherwise, if the packet fully fits in the buffer's remaining space the
 *   packet is written there.
 * - Otherwise, the packet goes into the next Rx buffer.
 *
 * This function returns the Rx buffer for a packet or fragment thereof of the
 * given length. If it isn't @buf it either recycles or frees that buffer
 * before advancing the queue to the next buffer.
 *
 * If called repeatedly with the remaining length of a packet it will walk
 * through all the buffers containing the packet.
 */
static struct funeth_rxbuf *
get_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len)
{}

/* Gather the page fragments making up the first Rx packet on @q. Its total
 * length @tot_len includes optional head- and tail-rooms.
 *
 * Return 0 if the device retains ownership of at least some of the pages.
 * In this case the caller may only copy the packet.
 *
 * A non-zero return value gives the caller permission to use references to the
 * pages, e.g., attach them to skbs. Additionally, if the value is <0 at least
 * one of the pages is PF_MEMALLOC.
 *
 * Regardless of outcome the caller is granted a reference to each of the pages.
 */
static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len,
			  skb_frag_t *frags)
{}

static bool rx_hwtstamp_enabled(const struct net_device *dev)
{}

/* Advance the CQ pointers and phase tag to the next CQE. */
static void advance_cq(struct funeth_rxq *q)
{}

/* Process the packet represented by the head CQE of @q. Gather the packet's
 * fragments, run it through the optional XDP program, and if needed construct
 * an skb and pass it to the stack.
 */
static void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q)
{}

/* Return 0 if the phase tag of the CQE at the CQ's head matches expectations
 * indicating the CQE is new.
 */
static u16 cqe_phase_mismatch(const struct fun_cqe_info *ci, u16 phase)
{}

/* Walk through a CQ identifying and processing fresh CQEs up to the given
 * budget. Return the remaining budget.
 */
static int fun_process_cqes(struct funeth_rxq *q, int budget)
{}

/* NAPI handler for Rx queues. Calls the CQE processing loop and writes RQ/CQ
 * doorbells as needed.
 */
int fun_rxq_napi_poll(struct napi_struct *napi, int budget)
{}

/* Free the Rx buffers of an Rx queue. */
static void fun_rxq_free_bufs(struct funeth_rxq *q)
{}

/* Initially provision an Rx queue with Rx buffers. */
static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node)
{}

/* Initialize a used-buffer cache of the given depth. */
static int fun_rxq_init_cache(struct funeth_rx_cache *c, unsigned int depth,
			      int node)
{}

/* Deallocate an Rx queue's used-buffer cache and its contents. */
static void fun_rxq_free_cache(struct funeth_rxq *q)
{}

int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog)
{}

/* Create an Rx queue, allocating the host memory it needs. */
static struct funeth_rxq *fun_rxq_create_sw(struct net_device *dev,
					    unsigned int qidx,
					    unsigned int ncqe,
					    unsigned int nrqe,
					    struct fun_irq *irq)
{}

static void fun_rxq_free_sw(struct funeth_rxq *q)
{}

/* Create an Rx queue's resources on the device. */
int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq)
{}

static void fun_rxq_free_dev(struct funeth_rxq *q)
{}

/* Create or advance an Rx queue, allocating all the host and device resources
 * needed to reach the target state.
 */
int funeth_rxq_create(struct net_device *dev, unsigned int qidx,
		      unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq,
		      int state, struct funeth_rxq **qp)
{}

/* Free Rx queue resources until it reaches the target state. */
struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state)
{}