linux/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h

/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 1999 - 2018 Intel Corporation. */

#ifndef _IXGBEVF_H_
#define _IXGBEVF_H_

#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/u64_stats_sync.h>
#include <net/xdp.h>

#include "vf.h"
#include "ipsec.h"

#define IXGBE_MAX_TXD_PWR
#define IXGBE_MAX_DATA_PER_TXD

/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S)
#define DESC_NEEDED

/* wrapper around a pointer to a socket buffer,
 * so a DMA handle can be stored along with the buffer
 */
struct ixgbevf_tx_buffer {};

struct ixgbevf_rx_buffer {};

struct ixgbevf_stats {};

struct ixgbevf_tx_queue_stats {};

struct ixgbevf_rx_queue_stats {};

enum ixgbevf_ring_state_t {};

#define ring_is_xdp(ring)
#define set_ring_xdp(ring)
#define clear_ring_xdp(ring)

struct ixgbevf_ring {} ____cacheline_internodealigned_in_smp;

/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBEVF_RX_BUFFER_WRITE

#define MAX_RX_QUEUES
#define MAX_TX_QUEUES
#define MAX_XDP_QUEUES
#define IXGBEVF_MAX_RSS_QUEUES
#define IXGBEVF_82599_RETA_SIZE
#define IXGBEVF_X550_VFRETA_SIZE
#define IXGBEVF_RSS_HASH_KEY_SIZE
#define IXGBEVF_VFRSSRK_REGS

#define IXGBEVF_DEFAULT_TXD
#define IXGBEVF_DEFAULT_RXD
#define IXGBEVF_MAX_TXD
#define IXGBEVF_MIN_TXD
#define IXGBEVF_MAX_RXD
#define IXGBEVF_MIN_RXD

/* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_256
#define IXGBEVF_RXBUFFER_2048
#define IXGBEVF_RXBUFFER_3072

#define IXGBEVF_RX_HDR_SIZE

#define MAXIMUM_ETHERNET_VLAN_SIZE

#define IXGBEVF_SKB_PAD
#if (PAGE_SIZE < 8192)
#define IXGBEVF_MAX_FRAME_BUILD_SKB
#else
#define IXGBEVF_MAX_FRAME_BUILD_SKB
#endif

#define IXGBE_TX_FLAGS_CSUM
#define IXGBE_TX_FLAGS_VLAN
#define IXGBE_TX_FLAGS_TSO
#define IXGBE_TX_FLAGS_IPV4
#define IXGBE_TX_FLAGS_IPSEC
#define IXGBE_TX_FLAGS_VLAN_MASK
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK
#define IXGBE_TX_FLAGS_VLAN_SHIFT

#define ring_uses_large_buffer(ring)
#define set_ring_uses_large_buffer(ring)
#define clear_ring_uses_large_buffer(ring)

#define ring_uses_build_skb(ring)
#define set_ring_build_skb_enabled(ring)
#define clear_ring_build_skb_enabled(ring)

static inline unsigned int ixgbevf_rx_bufsz(struct ixgbevf_ring *ring)
{}

static inline unsigned int ixgbevf_rx_pg_order(struct ixgbevf_ring *ring)
{}

#define ixgbevf_rx_pg_size(_ring)

#define check_for_tx_hang(ring)
#define set_check_for_tx_hang(ring)
#define clear_check_for_tx_hang(ring)

struct ixgbevf_ring_container {};

/* iterator for handling rings in ring container */
#define ixgbevf_for_each_ring(pos, head)

/* MAX_MSIX_Q_VECTORS of these are allocated,
 * but we only use one per queue-specific vector.
 */
struct ixgbevf_q_vector {};

/* microsecond values for various ITR rates shifted by 2 to fit itr register
 * with the first 3 bits reserved 0
 */
#define IXGBE_MIN_RSC_ITR
#define IXGBE_100K_ITR
#define IXGBE_20K_ITR
#define IXGBE_12K_ITR

/* Helper macros to switch between ints/sec and what the register uses.
 * And yes, it's the same math going both ways.  The lowest value
 * supported by all of the ixgbe hardware is 8.
 */
#define EITR_INTS_PER_SEC_TO_REG(_eitr)
#define EITR_REG_TO_INTS_PER_SEC

/* ixgbevf_test_staterr - tests bits in Rx descriptor status and error fields */
static inline __le32 ixgbevf_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
					  const u32 stat_err_bits)
{}

static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
{}

static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
{}

#define IXGBEVF_RX_DESC(R, i)
#define IXGBEVF_TX_DESC(R, i)
#define IXGBEVF_TX_CTXTDESC(R, i)

#define IXGBE_MAX_JUMBO_FRAME_SIZE

#define OTHER_VECTOR
#define NON_Q_VECTORS

#define MAX_MSIX_Q_VECTORS

#define MIN_MSIX_Q_VECTORS
#define MIN_MSIX_COUNT

#define IXGBEVF_RX_DMA_ATTR

/* board specific private data structure */
struct ixgbevf_adapter {};

enum ixbgevf_state_t {};

enum ixgbevf_boards {};

enum ixgbevf_xcast_modes {};

extern const struct ixgbevf_info ixgbevf_82599_vf_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy;
extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;

extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;

/* needed by ethtool.c */
extern const char ixgbevf_driver_name[];

int ixgbevf_open(struct net_device *netdev);
int ixgbevf_close(struct net_device *netdev);
void ixgbevf_up(struct ixgbevf_adapter *adapter);
void ixgbevf_down(struct ixgbevf_adapter *adapter);
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
void ixgbevf_reset(struct ixgbevf_adapter *adapter);
void ixgbevf_set_ethtool_ops(struct net_device *netdev);
int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
			       struct ixgbevf_ring *rx_ring);
int ixgbevf_setup_tx_resources(struct ixgbevf_ring *);
void ixgbevf_free_rx_resources(struct ixgbevf_ring *);
void ixgbevf_free_tx_resources(struct ixgbevf_ring *);
void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
int ethtool_ioctl(struct ifreq *ifr);

extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);

#ifdef CONFIG_IXGBEVF_IPSEC
void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter);
void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter);
void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter);
void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
		      union ixgbe_adv_rx_desc *rx_desc,
		      struct sk_buff *skb);
int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
		     struct ixgbevf_tx_buffer *first,
		     struct ixgbevf_ipsec_tx_data *itd);
#else
static inline void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
{ }
static inline void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter)
{ }
static inline void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter) { }
static inline void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
				    union ixgbe_adv_rx_desc *rx_desc,
				    struct sk_buff *skb) { }
static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
				   struct ixgbevf_tx_buffer *first,
				   struct ixgbevf_ipsec_tx_data *itd)
{ return 0; }
#endif /* CONFIG_IXGBEVF_IPSEC */

#define ixgbevf_hw_to_netdev(hw)

#define hw_dbg(hw, format, arg...)

s32 ixgbevf_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size);
s32 ixgbevf_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size);

#endif /* _IXGBEVF_H_ */