linux/drivers/net/ethernet/intel/ixgbe/ixgbe.h

/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 1999 - 2018 Intel Corporation. */

#ifndef _IXGBE_H_
#define _IXGBE_H_

#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/cpumask.h>
#include <linux/if_vlan.h>
#include <linux/jiffies.h>
#include <linux/phy.h>

#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>

#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb.h"
#if IS_ENABLED(CONFIG_FCOE)
#define IXGBE_FCOE
#include "ixgbe_fcoe.h"
#endif /* IS_ENABLED(CONFIG_FCOE) */
#ifdef CONFIG_IXGBE_DCA
#include <linux/dca.h>
#endif
#include "ixgbe_ipsec.h"

#include <net/xdp.h>

/* common prefix used by pr_<> macros */
#undef pr_fmt
#define pr_fmt(fmt)

/* TX/RX descriptor defines */
#define IXGBE_DEFAULT_TXD
#define IXGBE_DEFAULT_TX_WORK
#define IXGBE_MAX_TXD_82598
#define IXGBE_MAX_TXD_82599
#define IXGBE_MAX_TXD_X540
#define IXGBE_MAX_TXD_X550
#define IXGBE_MIN_TXD

#if (PAGE_SIZE < 8192)
#define IXGBE_DEFAULT_RXD
#else
#define IXGBE_DEFAULT_RXD
#endif
#define IXGBE_MAX_RXD_82598
#define IXGBE_MAX_RXD_82599
#define IXGBE_MAX_RXD_X540
#define IXGBE_MAX_RXD_X550
#define IXGBE_MIN_RXD

/* flow control */
#define IXGBE_MIN_FCRTL
#define IXGBE_MAX_FCRTL
#define IXGBE_MIN_FCRTH
#define IXGBE_MAX_FCRTH
#define IXGBE_DEFAULT_FCPAUSE
#define IXGBE_MIN_FCPAUSE
#define IXGBE_MAX_FCPAUSE

/* Supported Rx Buffer Sizes */
#define IXGBE_RXBUFFER_256
#define IXGBE_RXBUFFER_1536
#define IXGBE_RXBUFFER_2K
#define IXGBE_RXBUFFER_3K
#define IXGBE_RXBUFFER_4K
#define IXGBE_MAX_RXBUFFER

#define IXGBE_PKT_HDR_PAD

/* Attempt to maximize the headroom available for incoming frames.  We
 * use a 2K buffer for receives and need 1536/1534 to store the data for
 * the frame.  This leaves us with 512 bytes of room.  From that we need
 * to deduct the space needed for the shared info and the padding needed
 * to IP align the frame.
 *
 * Note: For cache line sizes 256 or larger this value is going to end
 *	 up negative.  In these cases we should fall back to the 3K
 *	 buffers.
 */
#if (PAGE_SIZE < 8192)
#define IXGBE_MAX_2K_FRAME_BUILD_SKB
#define IXGBE_2K_TOO_SMALL_WITH_PADDING

static inline int ixgbe_compute_pad(int rx_buf_len)
{}

static inline int ixgbe_skb_pad(void)
{}

#define IXGBE_SKB_PAD
#else
#define IXGBE_SKB_PAD
#endif

/*
 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
 * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
 * this adds up to 448 bytes of extra data.
 *
 * Since netdev_alloc_skb now allocates a page fragment we can use a value
 * of 256 and the resultant skb will have a truesize of 960 or less.
 */
#define IXGBE_RX_HDR_SIZE

/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBE_RX_BUFFER_WRITE

#define IXGBE_RX_DMA_ATTR

enum ixgbe_tx_flags {};

/* VLAN info */
#define IXGBE_TX_FLAGS_VLAN_MASK
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT
#define IXGBE_TX_FLAGS_VLAN_SHIFT

#define IXGBE_MAX_VF_MC_ENTRIES
#define IXGBE_MAX_VF_FUNCTIONS
#define IXGBE_MAX_VFTA_ENTRIES
#define MAX_EMULATION_MAC_ADDRS
#define IXGBE_MAX_PF_MACVLANS
#define VMDQ_P(p)
#define IXGBE_82599_VF_DEVICE_ID
#define IXGBE_X540_VF_DEVICE_ID

#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)

#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter)

struct vf_stats {};

struct vf_data_storage {};

enum ixgbevf_xcast_modes {};

struct vf_macvlans {};

#define IXGBE_MAX_TXD_PWR
#define IXGBE_MAX_DATA_PER_TXD

/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S)
#define DESC_NEEDED

/* wrapper around a pointer to a socket buffer,
 * so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {};

struct ixgbe_rx_buffer {};

struct ixgbe_queue_stats {};

struct ixgbe_tx_queue_stats {};

struct ixgbe_rx_queue_stats {};

#define IXGBE_TS_HDR_LEN

enum ixgbe_ring_state_t {};

#define ring_uses_build_skb(ring)

struct ixgbe_fwd_adapter {};

#define check_for_tx_hang(ring)
#define set_check_for_tx_hang(ring)
#define clear_check_for_tx_hang(ring)
#define ring_is_rsc_enabled(ring)
#define set_ring_rsc_enabled(ring)
#define clear_ring_rsc_enabled(ring)
#define ring_is_xdp(ring)
#define set_ring_xdp(ring)
#define clear_ring_xdp(ring)
struct ixgbe_ring {} ____cacheline_internodealigned_in_smp;

enum ixgbe_ring_f_enum {};

#define IXGBE_MAX_RSS_INDICES
#define IXGBE_MAX_RSS_INDICES_X550
#define IXGBE_MAX_VMDQ_INDICES
#define IXGBE_MAX_FDIR_INDICES
#define IXGBE_MAX_FCOE_INDICES
#define MAX_RX_QUEUES
#define MAX_TX_QUEUES
#define IXGBE_MAX_XDP_QS
#define IXGBE_MAX_L2A_QUEUES
#define IXGBE_BAD_L2A_QUEUE
#define IXGBE_MAX_MACVLANS

DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);

struct ixgbe_ring_feature {} ____cacheline_internodealigned_in_smp;

#define IXGBE_82599_VMDQ_8Q_MASK
#define IXGBE_82599_VMDQ_4Q_MASK
#define IXGBE_82599_VMDQ_2Q_MASK

/*
 * FCoE requires that all Rx buffers be over 2200 bytes in length.  Since
 * this is twice the size of a half page we need to double the page order
 * for FCoE enabled Rx queues.
 */
static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
{}

static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
{}
#define ixgbe_rx_pg_size(_ring)

#define IXGBE_ITR_ADAPTIVE_MIN_INC
#define IXGBE_ITR_ADAPTIVE_MIN_USECS
#define IXGBE_ITR_ADAPTIVE_MAX_USECS
#define IXGBE_ITR_ADAPTIVE_LATENCY
#define IXGBE_ITR_ADAPTIVE_BULK

struct ixgbe_ring_container {};

/* iterator for handling rings in ring container */
#define ixgbe_for_each_ring(pos, head)

#define MAX_RX_PACKET_BUFFERS
#define MAX_TX_PACKET_BUFFERS

/* MAX_Q_VECTORS of these are allocated,
 * but we only use one per queue-specific vector.
 */
struct ixgbe_q_vector {};

#ifdef CONFIG_IXGBE_HWMON

#define IXGBE_HWMON_TYPE_LOC
#define IXGBE_HWMON_TYPE_TEMP
#define IXGBE_HWMON_TYPE_CAUTION
#define IXGBE_HWMON_TYPE_MAX

struct hwmon_attr {};

struct hwmon_buff {};
#endif /* CONFIG_IXGBE_HWMON */

/*
 * microsecond values for various ITR rates shifted by 2 to fit itr register
 * with the first 3 bits reserved 0
 */
#define IXGBE_MIN_RSC_ITR
#define IXGBE_100K_ITR
#define IXGBE_20K_ITR
#define IXGBE_12K_ITR

/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
					const u32 stat_err_bits)
{}

static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
{}

#define IXGBE_RX_DESC(R, i)
#define IXGBE_TX_DESC(R, i)
#define IXGBE_TX_CTXTDESC(R, i)

#define IXGBE_MAX_JUMBO_FRAME_SIZE
#ifdef IXGBE_FCOE
/* Use 3K as the baby jumbo frame size for FCoE */
#define IXGBE_FCOE_JUMBO_FRAME_SIZE
#endif /* IXGBE_FCOE */

#define OTHER_VECTOR
#define NON_Q_VECTORS

#define MAX_MSIX_VECTORS_82599
#define MAX_Q_VECTORS_82599
#define MAX_MSIX_VECTORS_82598
#define MAX_Q_VECTORS_82598

struct ixgbe_mac_addr {};

#define IXGBE_MAC_STATE_DEFAULT
#define IXGBE_MAC_STATE_MODIFIED
#define IXGBE_MAC_STATE_IN_USE

#define MAX_Q_VECTORS
#define MAX_MSIX_COUNT

#define MIN_MSIX_Q_VECTORS
#define MIN_MSIX_COUNT

/* default to trying for four seconds */
#define IXGBE_TRY_LINK_TIMEOUT
#define IXGBE_SFP_POLL_JIFFIES

#define IXGBE_PRIMARY_ABORT_LIMIT

/* board specific private data structure */
struct ixgbe_adapter {};

static inline int ixgbe_determine_xdp_q_idx(int cpu)
{}

static inline
struct ixgbe_ring *ixgbe_determine_xdp_ring(struct ixgbe_adapter *adapter)
{}

static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
{}

struct ixgbe_fdir_filter {};

enum ixgbe_state_t {};

struct ixgbe_cb {};
#define IXGBE_CB(skb)

enum ixgbe_boards {};

extern const struct ixgbe_info ixgbe_82598_info;
extern const struct ixgbe_info ixgbe_82599_info;
extern const struct ixgbe_info ixgbe_X540_info;
extern const struct ixgbe_info ixgbe_X550_info;
extern const struct ixgbe_info ixgbe_X550EM_x_info;
extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
extern const struct ixgbe_info ixgbe_x550em_a_info;
extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
#endif

extern char ixgbe_driver_name[];
#ifdef IXGBE_FCOE
extern char ixgbe_default_device_descr[];
#endif /* IXGBE_FCOE */

int ixgbe_open(struct net_device *netdev);
int ixgbe_close(struct net_device *netdev);
void ixgbe_up(struct ixgbe_adapter *adapter);
void ixgbe_down(struct ixgbe_adapter *adapter);
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
void ixgbe_reset(struct ixgbe_adapter *adapter);
void ixgbe_set_ethtool_ops(struct net_device *netdev);
int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
int ixgbe_setup_tx_resources(struct ixgbe_ring *);
void ixgbe_free_rx_resources(struct ixgbe_ring *);
void ixgbe_free_tx_resources(struct ixgbe_ring *);
void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
void ixgbe_disable_rx(struct ixgbe_adapter *adapter);
void ixgbe_disable_tx(struct ixgbe_adapter *adapter);
void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
			 u16 subdevice_id);
#ifdef CONFIG_PCI_IOV
void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
#endif
int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
			 const u8 *addr, u16 queue);
int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
			 const u8 *addr, u16 queue);
void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid);
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
				  struct ixgbe_ring *);
void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
void ixgbe_write_eitr(struct ixgbe_q_vector *);
int ixgbe_poll(struct napi_struct *napi, int budget);
int ethtool_ioctl(struct ifreq *ifr);
int ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
int ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
int ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
int ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
					  union ixgbe_atr_hash_dword input,
					  union ixgbe_atr_hash_dword common,
					  u8 queue);
int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
				    union ixgbe_atr_input *input_mask);
int ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
					  union ixgbe_atr_input *input,
					  u16 soft_id, u8 queue);
int ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
					  union ixgbe_atr_input *input,
					  u16 soft_id);
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
					  union ixgbe_atr_input *mask);
int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
				    struct ixgbe_fdir_filter *input,
				    u16 sw_idx);
void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
#endif
int ixgbe_setup_tc(struct net_device *dev, u8 tc);
void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
void ixgbe_do_reset(struct net_device *netdev);
#ifdef CONFIG_IXGBE_HWMON
void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
#endif /* CONFIG_IXGBE_HWMON */
#ifdef IXGBE_FCOE
void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
	      u8 *hdr_len);
int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
		   union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
		       struct scatterlist *sgl, unsigned int sgc);
int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
			  struct scatterlist *sgl, unsigned int sgc);
int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
int ixgbe_fcoe_enable(struct net_device *netdev);
int ixgbe_fcoe_disable(struct net_device *netdev);
int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
			   struct netdev_fcoe_hbainfo *info);
u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
#endif /* IXGBE_FCOE */
#ifdef CONFIG_DEBUG_FS
void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
void ixgbe_dbg_init(void);
void ixgbe_dbg_exit(void);
#else
static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
static inline void ixgbe_dbg_init(void) {}
static inline void ixgbe_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS */
static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
{}

void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter);
void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *);
void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb);
static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
					 union ixgbe_adv_rx_desc *rx_desc,
					 struct sk_buff *skb)
{}

int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
#endif

netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
				  struct ixgbe_adapter *adapter,
				  struct ixgbe_ring *tx_ring);
u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
void ixgbe_store_key(struct ixgbe_adapter *adapter);
void ixgbe_store_reta(struct ixgbe_adapter *adapter);
int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
#ifdef CONFIG_IXGBE_IPSEC
void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
		    union ixgbe_adv_rx_desc *rx_desc,
		    struct sk_buff *skb);
int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
		   struct ixgbe_ipsec_tx_data *itd);
void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf);
int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
#else
static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }
static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }
static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }
static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
				  union ixgbe_adv_rx_desc *rx_desc,
				  struct sk_buff *skb) { }
static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
				 struct ixgbe_tx_buffer *first,
				 struct ixgbe_ipsec_tx_data *itd) { return 0; }
static inline void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter,
					u32 vf) { }
static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter,
					u32 *mbuf, u32 vf) { return -EACCES; }
static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter,
					u32 *mbuf, u32 vf) { return -EACCES; }
#endif /* CONFIG_IXGBE_IPSEC */

static inline bool ixgbe_enabled_xdp_adapter(struct ixgbe_adapter *adapter)
{}

#endif /* _IXGBE_H_ */