linux/drivers/net/ethernet/sfc/rx_common.c

// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
 * Driver for Solarflare network controllers and boards
 * Copyright 2018 Solarflare Communications Inc.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation, incorporated herein by reference.
 */

#include "net_driver.h"
#include <linux/module.h>
#include <linux/iommu.h>
#include <net/rps.h>
#include "efx.h"
#include "nic.h"
#include "rx_common.h"

/* This is the percentage fill level below which new RX descriptors
 * will be added to the RX descriptor ring.
 */
static unsigned int rx_refill_threshold;
module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC();

/* RX maximum head room required.
 *
 * This must be at least 1 to prevent overflow, plus one packet-worth
 * to allow pipelined receives.
 */
#define EFX_RXD_HEAD_ROOM

/* Check the RX page recycle ring for a page that can be reused. */
static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
{}

/* Attempt to recycle the page if there is an RX recycle ring; the page can
 * only be added if this is the final RX buffer, to prevent pages being used in
 * the descriptor ring and appearing in the recycle ring simultaneously.
 */
static void efx_recycle_rx_page(struct efx_channel *channel,
				struct efx_rx_buffer *rx_buf)
{}

/* Recycle the pages that are used by buffers that have just been received. */
void efx_recycle_rx_pages(struct efx_channel *channel,
			  struct efx_rx_buffer *rx_buf,
			  unsigned int n_frags)
{}

void efx_discard_rx_packet(struct efx_channel *channel,
			   struct efx_rx_buffer *rx_buf,
			   unsigned int n_frags)
{}

static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
{}

static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
{}

static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
			       struct efx_rx_buffer *rx_buf)
{}

int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
{}

void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
{}

void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
{}

void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
{}

/* Unmap a DMA-mapped page.  This function is only called for the final RX
 * buffer in a page.
 */
void efx_unmap_rx_buffer(struct efx_nic *efx,
			 struct efx_rx_buffer *rx_buf)
{}

void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
			 struct efx_rx_buffer *rx_buf,
			 unsigned int num_bufs)
{}

void efx_rx_slow_fill(struct timer_list *t)
{}

void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
{}

/* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
 *
 * @rx_queue:		Efx RX queue
 *
 * This allocates a batch of pages, maps them for DMA, and populates
 * struct efx_rx_buffers for each one. Return a negative error code or
 * 0 on success. If a single page can be used for multiple buffers,
 * then the page will either be inserted fully, or not at all.
 */
static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
{}

void efx_rx_config_page_split(struct efx_nic *efx)
{}

/* efx_fast_push_rx_descriptors - push new RX descriptors quickly
 * @rx_queue:		RX descriptor queue
 *
 * This will aim to fill the RX descriptor queue up to
 * @rx_queue->@max_fill. If there is insufficient atomic
 * memory to do so, a slow fill will be scheduled.
 *
 * The caller must provide serialisation (none is used here). In practise,
 * this means this function must run from the NAPI handler, or be called
 * when NAPI is disabled.
 */
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
{}

/* Pass a received packet up through GRO.  GRO can handle pages
 * regardless of checksum state and skbs with a good checksum.
 */
void
efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
		  unsigned int n_frags, u8 *eh, __wsum csum)
{}

struct efx_rss_context_priv *efx_find_rss_context_entry(struct efx_nic *efx,
							u32 id)
{}

void efx_set_default_rx_indir_table(struct efx_nic *efx, u32 *indir)
{}

/**
 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
 * @spec: Specification to test
 *
 * Return: %true if the specification is a non-drop RX filter that
 * matches a local MAC address I/G bit value of 1 or matches a local
 * IPv4 or IPv6 address value in the respective multicast address
 * range.  Otherwise %false.
 */
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
{}

bool efx_filter_spec_equal(const struct efx_filter_spec *left,
			   const struct efx_filter_spec *right)
{}

u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
{}

#ifdef CONFIG_RFS_ACCEL
bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
			bool *force)
{}

static
struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
				       const struct efx_filter_spec *spec)
{}

struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
					const struct efx_filter_spec *spec)
{}

struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
				       const struct efx_filter_spec *spec,
				       bool *new)
{}

void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
{}
#endif

int efx_probe_filters(struct efx_nic *efx)
{}

void efx_remove_filters(struct efx_nic *efx)
{}

#ifdef CONFIG_RFS_ACCEL

static void efx_filter_rfs_work(struct work_struct *data)
{}

int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
		   u16 rxq_index, u32 flow_id)
{}

bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
{}

#endif /* CONFIG_RFS_ACCEL */