linux/drivers/net/ethernet/intel/ice/ice_xsk.c

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Intel Corporation. */

#include <linux/bpf_trace.h>
#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "ice.h"
#include "ice_base.h"
#include "ice_type.h"
#include "ice_xsk.h"
#include "ice_txrx.h"
#include "ice_txrx_lib.h"
#include "ice_lib.h"

static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
{}

/**
 * ice_qp_reset_stats - Resets all stats for rings of given index
 * @vsi: VSI that contains rings of interest
 * @q_idx: ring index in array
 */
static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
{}

/**
 * ice_qp_clean_rings - Cleans all the rings of a given index
 * @vsi: VSI that contains rings of interest
 * @q_idx: ring index in array
 */
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
{}

/**
 * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
 * @vsi: VSI that has netdev
 * @q_vector: q_vector that has NAPI context
 * @enable: true for enable, false for disable
 */
static void
ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
		     bool enable)
{}

/**
 * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
 * @vsi: the VSI that contains queue vector being un-configured
 * @rx_ring: Rx ring that will have its IRQ disabled
 * @q_vector: queue vector
 */
static void
ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
		 struct ice_q_vector *q_vector)
{}

/**
 * ice_qvec_cfg_msix - Enable IRQ for given queue vector
 * @vsi: the VSI that contains queue vector
 * @q_vector: queue vector
 * @qid: queue index
 */
static void
ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
{}

/**
 * ice_qvec_ena_irq - Enable IRQ for given queue vector
 * @vsi: the VSI that contains queue vector
 * @q_vector: queue vector
 */
static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
{}

/**
 * ice_qp_dis - Disables a queue pair
 * @vsi: VSI of interest
 * @q_idx: ring index in array
 *
 * Returns 0 on success, negative on failure.
 */
static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
{}

/**
 * ice_qp_ena - Enables a queue pair
 * @vsi: VSI of interest
 * @q_idx: ring index in array
 *
 * Returns 0 on success, negative on failure.
 */
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{}

/**
 * ice_xsk_pool_disable - disable a buffer pool region
 * @vsi: Current VSI
 * @qid: queue ID
 *
 * Returns 0 on success, negative on failure
 */
static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
{}

/**
 * ice_xsk_pool_enable - enable a buffer pool region
 * @vsi: Current VSI
 * @pool: pointer to a requested buffer pool region
 * @qid: queue ID
 *
 * Returns 0 on success, negative on failure
 */
static int
ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{}

/**
 * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer
 * @rx_ring: Rx ring
 * @pool_present: is pool for XSK present
 *
 * Try allocating memory and return ENOMEM, if failed to allocate.
 * If allocation was successful, substitute buffer with allocated one.
 * Returns 0 on success, negative on failure
 */
static int
ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
{}

/**
 * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
 * @vsi: Current VSI
 * @zc: is zero copy set
 *
 * Reallocate buffer for rx_rings that might be used by XSK.
 * XDP requires more memory, than rx_buf provides.
 * Returns 0 on success, negative on failure
 */
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
{}

/**
 * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
 * @vsi: Current VSI
 * @pool: buffer pool to enable/associate to a ring, NULL to disable
 * @qid: queue ID
 *
 * Returns 0 on success, negative on failure
 */
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{}

/**
 * ice_fill_rx_descs - pick buffers from XSK buffer pool and use it
 * @pool: XSK Buffer pool to pull the buffers from
 * @xdp: SW ring of xdp_buff that will hold the buffers
 * @rx_desc: Pointer to Rx descriptors that will be filled
 * @count: The number of buffers to allocate
 *
 * This function allocates a number of Rx buffers from the fill ring
 * or the internal recycle mechanism and places them on the Rx ring.
 *
 * Note that ring wrap should be handled by caller of this function.
 *
 * Returns the amount of allocated Rx descriptors
 */
static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
			     union ice_32b_rx_flex_desc *rx_desc, u16 count)
{}

/**
 * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
 * @rx_ring: Rx ring
 * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
 * @count: The number of buffers to allocate
 *
 * Place the @count of descriptors onto Rx ring. Handle the ring wrap
 * for case where space from next_to_use up to the end of ring is less
 * than @count. Finally do a tail bump.
 *
 * Returns true if all allocations were successful, false if any fail.
 */
static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
				   struct xsk_buff_pool *xsk_pool, u16 count)
{}

/**
 * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
 * @rx_ring: Rx ring
 * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
 * @count: The number of buffers to allocate
 *
 * Wrapper for internal allocation routine; figure out how many tail
 * bumps should take place based on the given threshold
 *
 * Returns true if all calls to internal alloc routine succeeded
 */
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
			  struct xsk_buff_pool *xsk_pool, u16 count)
{}

/**
 * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
 * @rx_ring: Rx ring
 * @xdp: Pointer to XDP buffer
 *
 * This function allocates a new skb from a zero-copy Rx buffer.
 *
 * Returns the skb on success, NULL on failure.
 */
static struct sk_buff *
ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{}

/**
 * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
 * @xdp_ring: XDP Tx ring
 * @xsk_pool: AF_XDP buffer pool pointer
 */
static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring,
				struct xsk_buff_pool *xsk_pool)
{}

/**
 * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
 * @xdp: XDP buffer to xmit
 * @xdp_ring: XDP ring to produce descriptor onto
 * @xsk_pool: AF_XDP buffer pool pointer
 *
 * note that this function works directly on xdp_buff, no need to convert
 * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
 * side will be able to xsk_buff_free() it.
 *
 * Returns ICE_XDP_TX for successfully produced desc, ICE_XDP_CONSUMED if there
 * was not enough space on XDP ring
 */
static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
			      struct ice_tx_ring *xdp_ring,
			      struct xsk_buff_pool *xsk_pool)
{}

/**
 * ice_run_xdp_zc - Executes an XDP program in zero-copy path
 * @rx_ring: Rx ring
 * @xdp: xdp_buff used as input to the XDP program
 * @xdp_prog: XDP program to run
 * @xdp_ring: ring to be used for XDP_TX action
 * @xsk_pool: AF_XDP buffer pool pointer
 *
 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
 */
static int
ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
	       struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
	       struct xsk_buff_pool *xsk_pool)
{}

static int
ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
		 struct xdp_buff *xdp, const unsigned int size)
{}

/**
 * ice_clean_rx_irq_zc - consumes packets from the hardware ring
 * @rx_ring: AF_XDP Rx ring
 * @xsk_pool: AF_XDP buffer pool pointer
 * @budget: NAPI budget
 *
 * Returns number of processed packets on success, remaining budget on failure.
 */
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
			struct xsk_buff_pool *xsk_pool,
			int budget)
{}

/**
 * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
 * @xdp_ring: XDP ring to produce the HW Tx descriptor on
 * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
 * @desc: AF_XDP descriptor to pull the DMA address and length from
 * @total_bytes: bytes accumulator that will be used for stats update
 */
static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring,
			 struct xsk_buff_pool *xsk_pool, struct xdp_desc *desc,
			 unsigned int *total_bytes)
{}

/**
 * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
 * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
 * @total_bytes: bytes accumulator that will be used for stats update
 */
static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring,
			       struct xsk_buff_pool *xsk_pool,
			       struct xdp_desc *descs,
			       unsigned int *total_bytes)
{}

/**
 * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
 * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
 * @nb_pkts: count of packets to be send
 * @total_bytes: bytes accumulator that will be used for stats update
 */
static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring,
				struct xsk_buff_pool *xsk_pool,
				struct xdp_desc *descs, u32 nb_pkts,
				unsigned int *total_bytes)
{}

/**
 * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
 * @xsk_pool: AF_XDP buffer pool pointer
 *
 * Returns true if there is no more work that needs to be done, false otherwise
 */
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool)
{}

/**
 * ice_xsk_wakeup - Implements ndo_xsk_wakeup
 * @netdev: net_device
 * @queue_id: queue to wake up
 * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
 *
 * Returns negative on error, zero otherwise.
 */
int
ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
	       u32 __always_unused flags)
{}

/**
 * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
 * @vsi: VSI to be checked
 *
 * Returns true if any of the Rx rings has an AF_XDP buff pool attached
 */
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
{}

/**
 * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
 * @rx_ring: ring to be cleaned
 */
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{}

/**
 * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
 * @xdp_ring: XDP_Tx ring
 */
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
{}