linux/drivers/net/ethernet/sfc/falcon/rx.c

// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
 * Driver for Solarflare network controllers and boards
 * Copyright 2005-2006 Fen Systems Ltd.
 * Copyright 2005-2013 Solarflare Communications Inc.
 */

#include <linux/socket.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/prefetch.h>
#include <linux/moduleparam.h>
#include <linux/iommu.h>
#include <net/ip.h>
#include <net/checksum.h>
#include "net_driver.h"
#include "efx.h"
#include "filter.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"

/* Preferred number of descriptors to fill at once */
#define EF4_RX_PREFERRED_BATCH

/* Number of RX buffers to recycle pages for.  When creating the RX page recycle
 * ring, this number is divided by the number of buffers per page to calculate
 * the number of pages to store in the RX page recycle ring.
 */
#define EF4_RECYCLE_RING_SIZE_IOMMU
#define EF4_RECYCLE_RING_SIZE_NOIOMMU

/* Size of buffer allocated for skb header area. */
#define EF4_SKB_HEADERS

/* This is the percentage fill level below which new RX descriptors
 * will be added to the RX descriptor ring.
 */
static unsigned int rx_refill_threshold;

/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
#define EF4_RX_MAX_FRAGS

/*
 * RX maximum head room required.
 *
 * This must be at least 1 to prevent overflow, plus one packet-worth
 * to allow pipelined receives.
 */
#define EF4_RXD_HEAD_ROOM

static inline u8 *ef4_rx_buf_va(struct ef4_rx_buffer *buf)
{}

static inline u32 ef4_rx_buf_hash(struct ef4_nic *efx, const u8 *eh)
{}

static inline struct ef4_rx_buffer *
ef4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf)
{}

static inline void ef4_sync_rx_buffer(struct ef4_nic *efx,
				      struct ef4_rx_buffer *rx_buf,
				      unsigned int len)
{}

void ef4_rx_config_page_split(struct ef4_nic *efx)
{}

/* Check the RX page recycle ring for a page that can be reused. */
static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
{}

/**
 * ef4_init_rx_buffers - create EF4_RX_BATCH page-based RX buffers
 *
 * @rx_queue:		Efx RX queue
 * @atomic:		control memory allocation flags
 *
 * This allocates a batch of pages, maps them for DMA, and populates
 * struct ef4_rx_buffers for each one. Return a negative error code or
 * 0 on success. If a single page can be used for multiple buffers,
 * then the page will either be inserted fully, or not at all.
 */
static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic)
{}

/* Unmap a DMA-mapped page.  This function is only called for the final RX
 * buffer in a page.
 */
static void ef4_unmap_rx_buffer(struct ef4_nic *efx,
				struct ef4_rx_buffer *rx_buf)
{}

static void ef4_free_rx_buffers(struct ef4_rx_queue *rx_queue,
				struct ef4_rx_buffer *rx_buf,
				unsigned int num_bufs)
{}

/* Attempt to recycle the page if there is an RX recycle ring; the page can
 * only be added if this is the final RX buffer, to prevent pages being used in
 * the descriptor ring and appearing in the recycle ring simultaneously.
 */
static void ef4_recycle_rx_page(struct ef4_channel *channel,
				struct ef4_rx_buffer *rx_buf)
{}

static void ef4_fini_rx_buffer(struct ef4_rx_queue *rx_queue,
			       struct ef4_rx_buffer *rx_buf)
{}

/* Recycle the pages that are used by buffers that have just been received. */
static void ef4_recycle_rx_pages(struct ef4_channel *channel,
				 struct ef4_rx_buffer *rx_buf,
				 unsigned int n_frags)
{}

static void ef4_discard_rx_packet(struct ef4_channel *channel,
				  struct ef4_rx_buffer *rx_buf,
				  unsigned int n_frags)
{}

/**
 * ef4_fast_push_rx_descriptors - push new RX descriptors quickly
 * @rx_queue:		RX descriptor queue
 *
 * This will aim to fill the RX descriptor queue up to
 * @rx_queue->@max_fill. If there is insufficient atomic
 * memory to do so, a slow fill will be scheduled.
 * @atomic: control memory allocation flags
 *
 * The caller must provide serialisation (none is used here). In practise,
 * this means this function must run from the NAPI handler, or be called
 * when NAPI is disabled.
 */
void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
{}

void ef4_rx_slow_fill(struct timer_list *t)
{}

static void ef4_rx_packet__check_len(struct ef4_rx_queue *rx_queue,
				     struct ef4_rx_buffer *rx_buf,
				     int len)
{}

/* Pass a received packet up through GRO.  GRO can handle pages
 * regardless of checksum state and skbs with a good checksum.
 */
static void
ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
		  unsigned int n_frags, u8 *eh)
{}

/* Allocate and construct an SKB around page fragments */
static struct sk_buff *ef4_rx_mk_skb(struct ef4_channel *channel,
				     struct ef4_rx_buffer *rx_buf,
				     unsigned int n_frags,
				     u8 *eh, int hdr_len)
{}

void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
		   unsigned int n_frags, unsigned int len, u16 flags)
{}

static void ef4_rx_deliver(struct ef4_channel *channel, u8 *eh,
			   struct ef4_rx_buffer *rx_buf,
			   unsigned int n_frags)
{}

/* Handle a received packet.  Second half: Touches packet payload. */
void __ef4_rx_packet(struct ef4_channel *channel)
{}

int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue)
{}

static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
				     struct ef4_rx_queue *rx_queue)
{}

void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
{}

void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue)
{}

void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue)
{}


module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC();

#ifdef CONFIG_RFS_ACCEL

int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
		   u16 rxq_index, u32 flow_id)
{}

bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned int quota)
{}

#endif /* CONFIG_RFS_ACCEL */

/**
 * ef4_filter_is_mc_recipient - test whether spec is a multicast recipient
 * @spec: Specification to test
 *
 * Return: %true if the specification is a non-drop RX filter that
 * matches a local MAC address I/G bit value of 1 or matches a local
 * IPv4 or IPv6 address value in the respective multicast address
 * range.  Otherwise %false.
 */
bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec)
{}