linux/drivers/net/ethernet/intel/ice/ice.h

/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */

#ifndef _ICE_H_
#define _ICE_H_

#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/netdevice.h>
#include <linux/compiler.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/cpumask.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
#include <linux/interrupt.h>
#include <linux/ethtool.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/bitmap.h>
#include <linux/log2.h>
#include <linux/ip.h>
#include <linux/sctp.h>
#include <linux/ipv6.h>
#include <linux/pkt_sched.h>
#include <linux/if_bridge.h>
#include <linux/ctype.h>
#include <linux/linkmode.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/auxiliary_bus.h>
#include <linux/avf/virtchnl.h>
#include <linux/cpu_rmap.h>
#include <linux/dim.h>
#include <linux/gnss.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_gact.h>
#include <net/ip.h>
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
#include <net/xdp_sock_drv.h>
#include <net/geneve.h>
#include <net/gre.h>
#include <net/udp_tunnel.h>
#include <net/vxlan.h>
#include <net/gtp.h>
#include <linux/ppp_defs.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
#include "ice_dcb.h"
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_flow.h"
#include "ice_sched.h"
#include "ice_idc_int.h"
#include "ice_sriov.h"
#include "ice_vf_mbx.h"
#include "ice_ptp.h"
#include "ice_fdir.h"
#include "ice_xsk.h"
#include "ice_arfs.h"
#include "ice_repr.h"
#include "ice_eswitch.h"
#include "ice_lag.h"
#include "ice_vsi_vlan_ops.h"
#include "ice_gnss.h"
#include "ice_irq.h"
#include "ice_dpll.h"
#include "ice_adapter.h"

#define ICE_BAR0
#define ICE_REQ_DESC_MULTIPLE
#define ICE_MIN_NUM_DESC
#define ICE_MAX_NUM_DESC
#define ICE_DFLT_MIN_RX_DESC
#define ICE_DFLT_NUM_TX_DESC
#define ICE_DFLT_NUM_RX_DESC

#define ICE_DFLT_TRAFFIC_CLASS
#define ICE_INT_NAME_STR_LEN
#define ICE_AQ_LEN
#define ICE_MBXSQ_LEN
#define ICE_SBQ_LEN
#define ICE_MIN_LAN_TXRX_MSIX
#define ICE_MIN_LAN_OICR_MSIX
#define ICE_MIN_MSIX
#define ICE_FDIR_MSIX
#define ICE_RDMA_NUM_AEQ_MSIX
#define ICE_MIN_RDMA_MSIX
#define ICE_ESWITCH_MSIX
#define ICE_NO_VSI
#define ICE_VSI_MAP_CONTIG
#define ICE_VSI_MAP_SCATTER
#define ICE_MAX_SCATTER_TXQS
#define ICE_MAX_SCATTER_RXQS
#define ICE_Q_WAIT_RETRY_LIMIT
#define ICE_Q_WAIT_MAX_RETRY
#define ICE_MAX_LG_RSS_QS
#define ICE_INVAL_Q_INDEX

#define ICE_MAX_RXQS_PER_TC

#define ICE_CHNL_START_TC

#define ICE_MAX_RESET_WAIT

#define ICE_VSIQF_HKEY_ARRAY_SIZE

#define ICE_DFLT_NETIF_M

#define ICE_MAX_MTU

#define ICE_MAX_TSO_SIZE

#define ICE_UP_TABLE_TRANSLATE(val, i)

#define ICE_TX_DESC(R, i)
#define ICE_RX_DESC(R, i)
#define ICE_TX_CTX_DESC(R, i)
#define ICE_TX_FDIRDESC(R, i)

/* Minimum BW limit is 500 Kbps for any scheduler node */
#define ICE_MIN_BW_LIMIT
/* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes.
 * use it to convert user specified BW limit into Kbps
 */
#define ICE_BW_KBPS_DIVISOR

/* Default recipes have priority 4 and below, hence priority values between 5..7
 * can be used as filter priority for advanced switch filter (advanced switch
 * filters need new recipe to be created for specified extraction sequence
 * because default recipe extraction sequence does not represent custom
 * extraction)
 */
#define ICE_SWITCH_FLTR_PRIO_QUEUE
/* prio 6 is reserved for future use (e.g. switch filter with L3 fields +
 * (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as
 * SYN/FIN/RST))
 */
#define ICE_SWITCH_FLTR_PRIO_RSVD
#define ICE_SWITCH_FLTR_PRIO_VSI
#define ICE_SWITCH_FLTR_PRIO_QGRP

/* Macro for each VSI in a PF */
#define ice_for_each_vsi(pf, i)

/* Macros for each Tx/Xdp/Rx ring in a VSI */
#define ice_for_each_txq(vsi, i)

#define ice_for_each_xdp_txq(vsi, i)

#define ice_for_each_rxq(vsi, i)

/* Macros for each allocated Tx/Rx ring whether used or not in a VSI */
#define ice_for_each_alloc_txq(vsi, i)

#define ice_for_each_alloc_rxq(vsi, i)

#define ice_for_each_q_vector(vsi, i)

#define ice_for_each_chnl_tc(i)

#define ICE_UCAST_PROMISC_BITS

#define ICE_UCAST_VLAN_PROMISC_BITS

#define ICE_MCAST_PROMISC_BITS

#define ICE_MCAST_VLAN_PROMISC_BITS

#define ice_pf_to_dev(pf)

#define ice_pf_src_tmr_owned(pf)

enum ice_feature {};

DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key);

struct ice_channel {};

struct ice_txq_meta {};

struct ice_tc_info {};

struct ice_tc_cfg {};

struct ice_qs_cfg {};

struct ice_sw {};

enum ice_pf_state {};

enum ice_vsi_state {};

struct ice_vsi_stats {};

/* struct that defines a VSI, associated with a dev */
struct ice_vsi {} ____cacheline_internodealigned_in_smp;

/* struct that defines an interrupt vector */
struct ice_q_vector {} ____cacheline_internodealigned_in_smp;

enum ice_pf_flags {};

enum ice_misc_thread_tasks {};

struct ice_eswitch {};

struct ice_agg_node {};

struct ice_pf {};

extern struct workqueue_struct *ice_lag_wq;

struct ice_netdev_priv {};

/**
 * ice_vector_ch_enabled
 * @qv: pointer to q_vector, can be NULL
 *
 * This function returns true if vector is channel enabled otherwise false
 */
static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv)
{}

/**
 * ice_ptp_pf_handles_tx_interrupt - Check if PF handles Tx interrupt
 * @pf: Board private structure
 *
 * Return true if this PF should respond to the Tx timestamp interrupt
 * indication in the miscellaneous OICR interrupt handler.
 */
static inline bool ice_ptp_pf_handles_tx_interrupt(struct ice_pf *pf)
{}

/**
 * ice_irq_dynamic_ena - Enable default interrupt generation settings
 * @hw: pointer to HW struct
 * @vsi: pointer to VSI struct, can be NULL
 * @q_vector: pointer to q_vector, can be NULL
 */
static inline void
ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
		    struct ice_q_vector *q_vector)
{}

/**
 * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev
 * @netdev: pointer to the netdev struct
 */
static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
{}

static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
{}

static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
{}

/**
 * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
 * @vsi: pointer to VSI
 * @qid: index of a queue to look at XSK buff pool presence
 *
 * Return: A pointer to xsk_buff_pool structure if there is a buffer pool
 * attached and configured as zero-copy, NULL otherwise.
 */
static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
							u16 qid)
{}

/**
 * ice_rx_xsk_pool - assign XSK buff pool to Rx ring
 * @ring: Rx ring to use
 *
 * Sets XSK buff pool pointer on Rx ring.
 */
static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring)
{}

/**
 * ice_tx_xsk_pool - assign XSK buff pool to XDP ring
 * @vsi: pointer to VSI
 * @qid: index of a queue to look at XSK buff pool presence
 *
 * Sets XSK buff pool pointer on XDP ring.
 *
 * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided
 * queue id. Reason for doing so is that queue vectors might have assigned more
 * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring
 * carries a pointer to one of these XDP rings for its own purposes, such as
 * handling XDP_TX action, therefore we can piggyback here on the
 * rx_ring->xdp_ring assignment that was done during XDP rings initialization.
 */
static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
{}

/**
 * ice_get_main_vsi - Get the PF VSI
 * @pf: PF instance
 *
 * returns pf->vsi[0], which by definition is the PF VSI
 */
static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
{}

/**
 * ice_get_netdev_priv_vsi - return VSI associated with netdev priv.
 * @np: private netdev structure
 */
static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np)
{}

/**
 * ice_get_ctrl_vsi - Get the control VSI
 * @pf: PF instance
 */
static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
{}

/**
 * ice_find_vsi - Find the VSI from VSI ID
 * @pf: The PF pointer to search in
 * @vsi_num: The VSI ID to search for
 */
static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
{}

/**
 * ice_is_switchdev_running - check if switchdev is configured
 * @pf: pointer to PF structure
 *
 * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV
 * and switchdev is configured, false otherwise.
 */
static inline bool ice_is_switchdev_running(struct ice_pf *pf)
{}

#define ICE_FD_STAT_CTR_BLOCK_COUNT
#define ICE_FD_STAT_PF_IDX(base_idx)
#define ICE_FD_SB_STAT_IDX(base_idx)
#define ICE_FD_STAT_CH
#define ICE_FD_CH_STAT_IDX(base_idx)

/**
 * ice_is_adq_active - any active ADQs
 * @pf: pointer to PF
 *
 * This function returns true if there are any ADQs configured (which is
 * determined by looking at VSI type (which should be VSI_PF), numtc, and
 * TC_MQPRIO flag) otherwise return false
 */
static inline bool ice_is_adq_active(struct ice_pf *pf)
{}

void ice_debugfs_fwlog_init(struct ice_pf *pf);
void ice_debugfs_pf_deinit(struct ice_pf *pf);
void ice_debugfs_init(void);
void ice_debugfs_exit(void);
void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);

bool netif_is_ice(const struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
int ice_vsi_open_ctrl(struct ice_vsi *vsi);
int ice_vsi_open(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
void ice_set_ethtool_sf_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf);
void
ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
			     struct ice_q_stats stats, u64 *pkts, u64 *bytes);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_down_up(struct ice_vsi *vsi);
int ice_vsi_cfg_lan(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);

enum ice_xdp_cfg {};

int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
			  enum ice_xdp_cfg cfg_type);
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
void ice_map_xdp_rings(struct ice_vsi *vsi);
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
	     u32 flags);
int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed);
int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed);
int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
int ice_plug_aux_dev(struct ice_pf *pf);
void ice_unplug_aux_dev(struct ice_pf *pf);
int ice_init_rdma(struct ice_pf *pf);
void ice_deinit_rdma(struct ice_pf *pf);
const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw);
void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
int
ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
		    bool is_tun);
void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena);
int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd);
int
ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
		      u32 *rule_locs);
void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx);
void ice_fdir_release_flows(struct ice_hw *hw);
void ice_fdir_replay_flows(struct ice_hw *hw);
void ice_fdir_replay_fltrs(struct ice_pf *pf);
int ice_fdir_create_dflt_rules(struct ice_pf *pf);

enum ice_aq_task_state {};

struct ice_aq_task {};

void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
			   u16 opcode);
int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
			  unsigned long timeout);
int ice_open(struct net_device *netdev);
int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
int ice_load(struct ice_pf *pf);
void ice_unload(struct ice_pf *pf);
void ice_adv_lnk_speed_maps_init(void);
int ice_init_dev(struct ice_pf *pf);
void ice_deinit_dev(struct ice_pf *pf);
int ice_change_mtu(struct net_device *netdev, int new_mtu);
void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue);
int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp);
void ice_set_netdev_features(struct net_device *netdev);
int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
void ice_get_stats64(struct net_device *netdev,
		     struct rtnl_link_stats64 *stats);

/**
 * ice_set_rdma_cap - enable RDMA support
 * @pf: PF struct
 */
static inline void ice_set_rdma_cap(struct ice_pf *pf)
{}

/**
 * ice_clear_rdma_cap - disable RDMA support
 * @pf: PF struct
 */
static inline void ice_clear_rdma_cap(struct ice_pf *pf)
{}

extern const struct xdp_metadata_ops ice_xdp_md_ops;
#endif /* _ICE_H_ */