linux/drivers/net/ethernet/intel/iavf/iavf_txrx.h

/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */

#ifndef _IAVF_TXRX_H_
#define _IAVF_TXRX_H_

/* Interrupt Throttling and Rate Limiting Goodies */
#define IAVF_DEFAULT_IRQ_WORK

/* The datasheet for the X710 and XL710 indicate that the maximum value for
 * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
 * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
 * the register value which is divided by 2 lets use the actual values and
 * avoid an excessive amount of translation.
 */
#define IAVF_ITR_DYNAMIC
#define IAVF_ITR_MASK
#define IAVF_ITR_100K
#define IAVF_ITR_50K
#define IAVF_ITR_20K
#define IAVF_ITR_18K
#define IAVF_ITR_8K
#define IAVF_MAX_ITR
#define ITR_TO_REG(setting)
#define ITR_REG_ALIGN(setting)
#define ITR_IS_DYNAMIC(setting)

#define IAVF_ITR_RX_DEF
#define IAVF_ITR_TX_DEF

/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
 * the value of the rate limit is non-zero
 */
#define INTRL_ENA
#define IAVF_MAX_INTRL
#define INTRL_REG_TO_USEC(intrl)
#define INTRL_USEC_TO_REG(set)
#define IAVF_INTRL_8K
#define IAVF_INTRL_62K
#define IAVF_INTRL_83K

#define IAVF_QUEUE_END_OF_LIST

/* this enum matches hardware bits and is meant to be used by DYN_CTLN
 * registers and QINT registers or more generally anywhere in the manual
 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
 * register but instead is a special value meaning "don't update" ITR0/1/2.
 */
enum iavf_dyn_idx_t {};

/* these are indexes into ITRN registers */
#define IAVF_RX_ITR
#define IAVF_TX_ITR
#define IAVF_PE_ITR

/* Supported RSS offloads */
#define IAVF_DEFAULT_RSS_HENA

#define IAVF_DEFAULT_RSS_HENA_EXPANDED

#define iavf_rx_desc

/**
 * iavf_test_staterr - tests bits in Rx descriptor status and error fields
 * @rx_desc: pointer to receive descriptor (in le64 format)
 * @stat_err_bits: value to mask
 *
 * This function does some fast chicanery in order to return the
 * value of the mask which is really only used for boolean tests.
 * The status_error_len doesn't need to be shifted because it begins
 * at offset zero.
 */
static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
				     const u64 stat_err_bits)
{}

/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IAVF_RX_INCREMENT(r, i)

#define IAVF_RX_NEXT_DESC(r, i, n)

#define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n)

#define IAVF_MAX_BUFFER_TXD
#define IAVF_MIN_TX_LEN

/* The size limit for a transmit buffer in a descriptor is (16K - 1).
 * In order to align with the read requests we will align the value to
 * the nearest 4K which represents our maximum read request size.
 */
#define IAVF_MAX_READ_REQ_SIZE
#define IAVF_MAX_DATA_PER_TXD
#define IAVF_MAX_DATA_PER_TXD_ALIGNED

/**
 * iavf_txd_use_count  - estimate the number of descriptors needed for Tx
 * @size: transmit request size in bytes
 *
 * Due to hardware alignment restrictions (4K alignment), we need to
 * assume that we can have no more than 12K of data per descriptor, even
 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
 * Thus, we need to divide by 12K. But division is slow! Instead,
 * we decompose the operation into shifts and one relatively cheap
 * multiply operation.
 *
 * To divide by 12K, we first divide by 4K, then divide by 3:
 *     To divide by 4K, shift right by 12 bits
 *     To divide by 3, multiply by 85, then divide by 256
 *     (Divide by 256 is done by shifting right by 8 bits)
 * Finally, we add one to round up. Because 256 isn't an exact multiple of
 * 3, we'll underestimate near each multiple of 12K. This is actually more
 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
 * segment.  For our purposes this is accurate out to 1M which is orders of
 * magnitude greater than our largest possible GSO size.
 *
 * This would then be implemented as:
 *     return (((size >> 12) * 85) >> 8) + 1;
 *
 * Since multiplication and division are commutative, we can reorder
 * operations into:
 *     return ((size * 85) >> 20) + 1;
 */
static inline unsigned int iavf_txd_use_count(unsigned int size)
{}

/* Tx Descriptors needed, worst case */
#define DESC_NEEDED
#define IAVF_MIN_DESC_PENDING

#define IAVF_TX_FLAGS_HW_VLAN
#define IAVF_TX_FLAGS_SW_VLAN
#define IAVF_TX_FLAGS_TSO
#define IAVF_TX_FLAGS_IPV4
#define IAVF_TX_FLAGS_IPV6
#define IAVF_TX_FLAGS_FCCRC
#define IAVF_TX_FLAGS_FSO
#define IAVF_TX_FLAGS_FD_SB
#define IAVF_TX_FLAGS_VXLAN_TUNNEL
#define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN
#define IAVF_TX_FLAGS_VLAN_MASK
#define IAVF_TX_FLAGS_VLAN_PRIO_MASK
#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT
#define IAVF_TX_FLAGS_VLAN_SHIFT

struct iavf_tx_buffer {};

struct iavf_queue_stats {};

struct iavf_tx_queue_stats {};

struct iavf_rx_queue_stats {};

/* some useful defines for virtchannel interface, which
 * is the only remaining user of header split
 */
#define IAVF_RX_DTYPE_NO_SPLIT
#define IAVF_RX_DTYPE_HEADER_SPLIT
#define IAVF_RX_DTYPE_SPLIT_ALWAYS
#define IAVF_RX_SPLIT_L2
#define IAVF_RX_SPLIT_IP
#define IAVF_RX_SPLIT_TCP_UDP
#define IAVF_RX_SPLIT_SCTP

/* struct that defines a descriptor ring, associated with a VSI */
struct iavf_ring {} ____cacheline_internodealigned_in_smp;

#define IAVF_ITR_ADAPTIVE_MIN_INC
#define IAVF_ITR_ADAPTIVE_MIN_USECS
#define IAVF_ITR_ADAPTIVE_MAX_USECS
#define IAVF_ITR_ADAPTIVE_LATENCY
#define IAVF_ITR_ADAPTIVE_BULK
#define ITR_IS_BULK(x)

struct iavf_ring_container {};

/* iterator for handling rings in ring container */
#define iavf_for_each_ring(pos, head)

bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring);
void iavf_free_tx_resources(struct iavf_ring *tx_ring);
void iavf_free_rx_resources(struct iavf_ring *rx_ring);
int iavf_napi_poll(struct napi_struct *napi, int budget);
void iavf_detect_recover_hung(struct iavf_vsi *vsi);
int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);
bool __iavf_chk_linearize(struct sk_buff *skb);

/**
 * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed
 * @skb:     send buffer
 *
 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
 * there is not enough descriptors available in this ring since we need at least
 * one descriptor.
 **/
static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
{}

/**
 * iavf_maybe_stop_tx - 1st level check for Tx stop conditions
 * @tx_ring: the ring to be checked
 * @size:    the size buffer we want to assure is available
 *
 * Returns 0 if stop is not needed
 **/
static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
{}

/**
 * iavf_chk_linearize - Check if there are more than 8 fragments per packet
 * @skb:      send buffer
 * @count:    number of buffers used
 *
 * Note: Our HW can't scatter-gather more than 8 fragments to build
 * a packet on the wire and so we need to figure out the cases where we
 * need to linearize the skb.
 **/
static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
{}
/**
 * txring_txq - helper to convert from a ring to a queue
 * @ring: Tx ring to find the netdev equivalent of
 **/
static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
{}
#endif /* _IAVF_TXRX_H_ */