linux/drivers/net/ethernet/intel/ice/ice_txrx.h

/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */

#ifndef _ICE_TXRX_H_
#define _ICE_TXRX_H_

#include "ice_type.h"

#define ICE_DFLT_IRQ_WORK
#define ICE_RXBUF_3072
#define ICE_RXBUF_2048
#define ICE_RXBUF_1664
#define ICE_RXBUF_1536
#define ICE_MAX_CHAINED_RX_BUFS
#define ICE_MAX_BUF_TXD
#define ICE_MIN_TX_LEN
#define ICE_MAX_FRAME_LEGACY_RX

/* The size limit for a transmit buffer in a descriptor is (16K - 1).
 * In order to align with the read requests we will align the value to
 * the nearest 4K which represents our maximum read request size.
 */
#define ICE_MAX_READ_REQ_SIZE
#define ICE_MAX_DATA_PER_TXD
#define ICE_MAX_DATA_PER_TXD_ALIGNED

#define ICE_MAX_TXQ_PER_TXQG

/* Attempt to maximize the headroom available for incoming frames. We use a 2K
 * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
 * This leaves us with 512 bytes of room.  From that we need to deduct the
 * space needed for the shared info and the padding needed to IP align the
 * frame.
 *
 * Note: For cache line sizes 256 or larger this value is going to end
 *	 up negative.  In these cases we should fall back to the legacy
 *	 receive path.
 */
#if (PAGE_SIZE < 8192)
#define ICE_2K_TOO_SMALL_WITH_PADDING

/**
 * ice_compute_pad - compute the padding
 * @rx_buf_len: buffer length
 *
 * Figure out the size of half page based on given buffer length and
 * then subtract the skb_shared_info followed by subtraction of the
 * actual buffer length; this in turn results in the actual space that
 * is left for padding usage
 */
static inline int ice_compute_pad(int rx_buf_len)
{}

/**
 * ice_skb_pad - determine the padding that we can supply
 *
 * Figure out the right Rx buffer size and based on that calculate the
 * padding
 */
static inline int ice_skb_pad(void)
{}

#define ICE_SKB_PAD
#else
#define ICE_2K_TOO_SMALL_WITH_PADDING
#define ICE_SKB_PAD
#endif

/* We are assuming that the cache line is always 64 Bytes here for ice.
 * In order to make sure that is a correct assumption there is a check in probe
 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
 * size is 128 bytes. We do it this way because we do not want to read the
 * GLPCI_CNF2 register or a variable containing the value on every pass through
 * the Tx path.
 */
#define ICE_CACHE_LINE_BYTES
#define ICE_DESCS_PER_CACHE_LINE
#define ICE_DESCS_FOR_CTX_DESC
#define ICE_DESCS_FOR_SKB_DATA_PTR
/* Tx descriptors needed, worst case */
#define DESC_NEEDED
#define ICE_DESC_UNUSED(R)

#define ICE_RX_DESC_UNUSED(R)

#define ICE_RING_QUARTER(R)

#define ICE_TX_FLAGS_TSO
#define ICE_TX_FLAGS_HW_VLAN
#define ICE_TX_FLAGS_SW_VLAN
/* Free, was ICE_TX_FLAGS_DUMMY_PKT */
#define ICE_TX_FLAGS_TSYN
#define ICE_TX_FLAGS_IPV4
#define ICE_TX_FLAGS_IPV6
#define ICE_TX_FLAGS_TUNNEL
#define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN

#define ICE_XDP_PASS
#define ICE_XDP_CONSUMED
#define ICE_XDP_TX
#define ICE_XDP_REDIR
#define ICE_XDP_EXIT
#define ICE_SKB_CONSUMED

#define ICE_RX_DMA_ATTR

#define ICE_ETH_PKT_HDR_PAD

#define ICE_TXD_LAST_DESC_CMD

/**
 * enum ice_tx_buf_type - type of &ice_tx_buf to act on Tx completion
 * @ICE_TX_BUF_EMPTY: unused OR XSk frame, no action required
 * @ICE_TX_BUF_DUMMY: dummy Flow Director packet, unmap and kfree()
 * @ICE_TX_BUF_FRAG: mapped skb OR &xdp_buff frag, only unmap DMA
 * @ICE_TX_BUF_SKB: &sk_buff, unmap and consume_skb(), update stats
 * @ICE_TX_BUF_XDP_TX: &xdp_buff, unmap and page_frag_free(), stats
 * @ICE_TX_BUF_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame(), stats
 * @ICE_TX_BUF_XSK_TX: &xdp_buff on XSk queue, xsk_buff_free(), stats
 */
enum ice_tx_buf_type {};

struct ice_tx_buf {};

struct ice_tx_offload_params {};

struct ice_rx_buf {};

struct ice_q_stats {};

struct ice_txq_stats {};

struct ice_rxq_stats {};

struct ice_ring_stats {};

enum ice_ring_state_t {};

/* this enum matches hardware bits and is meant to be used by DYN_CTLN
 * registers and QINT registers or more generally anywhere in the manual
 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
 * register but instead is a special value meaning "don't update" ITR0/1/2.
 */
enum ice_dyn_idx_t {};

/* Header split modes defined by DTYPE field of Rx RLAN context */
enum ice_rx_dtype {};

struct ice_pkt_ctx {};

struct ice_xdp_buff {};

/* Required for compatibility with xdp_buffs from xsk_pool */
static_assert();

/* indices into GLINT_ITR registers */
#define ICE_RX_ITR
#define ICE_TX_ITR
#define ICE_ITR_8K
#define ICE_ITR_20K
#define ICE_ITR_MAX
#define ICE_DFLT_TX_ITR
#define ICE_DFLT_RX_ITR
enum ice_dynamic_itr {};

#define ITR_IS_DYNAMIC(rc)
#define ICE_ITR_GRAN_S
#define ICE_ITR_GRAN_US
#define ICE_ITR_MASK
#define ITR_REG_ALIGN(setting)

#define ICE_DFLT_INTRL
#define ICE_MAX_INTRL

#define ICE_IN_WB_ON_ITR_MODE
/* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
 * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
 * set the write-back latency to the usecs passed in.
 */
#define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx)

/* Legacy or Advanced Mode Queue */
#define ICE_TX_ADVANCED
#define ICE_TX_LEGACY

/* descriptor ring, associated with a VSI */
struct ice_rx_ring {} ____cacheline_internodealigned_in_smp;

struct ice_tx_ring {} ____cacheline_internodealigned_in_smp;

static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
{}

static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
{}

static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
{}

static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
{}

static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring)
{}

enum ice_container_type {};

struct ice_ring_container {};

struct ice_coalesce_stored {};

/* iterator for handling rings in ring container */
#define ice_for_each_rx_ring(pos, head)

#define ice_for_each_tx_ring(pos, head)

static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
{}

#define ice_rx_pg_size(_ring)

ice_32b_rx_flex_desc;

bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
u16
ice_select_queue(struct net_device *dev, struct sk_buff *skb,
		 struct net_device *sb_dev);
void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
int
ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
		   u8 *raw_packet);
int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
#endif /* _ICE_TXRX_H_ */