linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c

/*
 * This file is part of the Chelsio T4 Ethernet driver for Linux.
 *
 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#define pr_fmt(fmt)

#include <linux/bitmap.h>
#include <linux/crc32.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/log2.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/sockios.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <net/neighbour.h>
#include <net/netevent.h>
#include <net/addrconf.h>
#include <net/bonding.h>
#include <linux/uaccess.h>
#include <linux/crash_dump.h>
#include <net/udp_tunnel.h>
#include <net/xfrm.h>
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
#include <net/tls.h>
#endif

#include "cxgb4.h"
#include "cxgb4_filter.h"
#include "t4_regs.h"
#include "t4_values.h"
#include "t4_msg.h"
#include "t4fw_api.h"
#include "t4fw_version.h"
#include "cxgb4_dcb.h"
#include "srq.h"
#include "cxgb4_debugfs.h"
#include "clip_tbl.h"
#include "l2t.h"
#include "smt.h"
#include "sched.h"
#include "cxgb4_tc_u32.h"
#include "cxgb4_tc_flower.h"
#include "cxgb4_tc_mqprio.h"
#include "cxgb4_tc_matchall.h"
#include "cxgb4_ptp.h"
#include "cxgb4_cudbg.h"

char cxgb4_driver_name[] =;

#define DRV_DESC

#define DFLT_MSG_ENABLE

/* Macros needed to support the PCI Device ID Table ...
 */
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
#define CXGB4_UNIFIED_PF

#define CH_PCI_DEVICE_ID_FUNCTION

/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
 * called for both.
 */
#define CH_PCI_DEVICE_ID_FUNCTION2

#define CH_PCI_ID_TABLE_ENTRY(devid)

#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END

#include "t4_pci_id_tbl.h"

#define FW4_FNAME
#define FW5_FNAME
#define FW6_FNAME
#define FW4_CFNAME
#define FW5_CFNAME
#define FW6_CFNAME
#define PHY_AQ1202_FIRMWARE
#define PHY_BCM84834_FIRMWARE
#define PHY_AQ1202_DEVICEID
#define PHY_BCM84834_DEVICEID

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_LICENSE();
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();

/*
 * The driver uses the best interrupt scheme available on a platform in the
 * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
 * of these schemes the driver may consider as follows:
 *
 * msi = 2: choose from among all three options
 * msi = 1: only consider MSI and INTx interrupts
 * msi = 0: force INTx interrupts
 */
static int msi =;

module_param(msi, int, 0644);
MODULE_PARM_DESC();

/*
 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
 * offset by 2 bytes in order to have the IP headers line up on 4-byte
 * boundaries.  This is a requirement for many architectures which will throw
 * a machine check fault if an attempt is made to access one of the 4-byte IP
 * header fields on a non-4-byte boundary.  And it's a major performance issue
 * even on some architectures which allow it like some implementations of the
 * x86 ISA.  However, some architectures don't mind this and for some very
 * edge-case performance sensitive applications (like forwarding large volumes
 * of small packets), setting this DMA offset to 0 will decrease the number of
 * PCI-E Bus transfers enough to measurably affect performance.
 */
static int rx_dma_offset =;

/* TX Queue select used to determine what algorithm to use for selecting TX
 * queue. Select between the kernel provided function (select_queue=0) or user
 * cxgb_select_queue function (select_queue=1)
 *
 * Default: select_queue=0
 */
static int select_queue;
module_param(select_queue, int, 0644);
MODULE_PARM_DESC();

static struct dentry *cxgb4_debugfs_root;

LIST_HEAD();
DEFINE_MUTEX();
LIST_HEAD();

static int cfg_queues(struct adapter *adap);

static void link_report(struct net_device *dev)
{}

#ifdef CONFIG_CHELSIO_T4_DCB
/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
{}

int cxgb4_dcb_enabled(const struct net_device *dev)
{}
#endif /* CONFIG_CHELSIO_T4_DCB */

void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
{}

void t4_os_portmod_changed(struct adapter *adap, int port_id)
{}

int dbfifo_int_thresh =; /* 10 == 640 entry threshold */
module_param(dbfifo_int_thresh, int, 0644);
MODULE_PARM_DESC();

/*
 * usecs to sleep while draining the dbfifo
 */
static int dbfifo_drain_delay =;
module_param(dbfifo_drain_delay, int, 0644);
MODULE_PARM_DESC();

static inline int cxgb4_set_addr_hash(struct port_info *pi)
{}

static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
{}

static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
{}

/*
 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
 * If @mtu is -1 it is left unchanged.
 */
static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
{}

/**
 *	cxgb4_change_mac - Update match filter for a MAC address.
 *	@pi: the port_info
 *	@viid: the VI id
 *	@tcam_idx: TCAM index of existing filter for old value of MAC address,
 *		   or -1
 *	@addr: the new MAC address value
 *	@persist: whether a new MAC allocation should be persistent
 *	@smt_idx: the destination to store the new SMT index.
 *
 *	Modifies an MPS filter and sets it to the new MAC address if
 *	@tcam_idx >= 0, or adds the MAC address to a new filter if
 *	@tcam_idx < 0. In the latter case the address is added persistently
 *	if @persist is %true.
 *	Addresses are programmed to hash region, if tcam runs out of entries.
 *
 */
int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
		     int *tcam_idx, const u8 *addr, bool persist,
		     u8 *smt_idx)
{}

/*
 *	link_start - enable a port
 *	@dev: the port to enable
 *
 *	Performs the MAC and PHY actions needed to enable a port.
 */
static int link_start(struct net_device *dev)
{}

#ifdef CONFIG_CHELSIO_T4_DCB
/* Handle a Data Center Bridging update message from the firmware. */
static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
{}
#endif /* CONFIG_CHELSIO_T4_DCB */

/* Response queue handler for the FW event queue.
 */
static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
			  const struct pkt_gl *gl)
{}

static void disable_msi(struct adapter *adapter)
{}

/*
 * Interrupt handler for non-data events used with MSI-X.
 */
static irqreturn_t t4_nondata_intr(int irq, void *cookie)
{}

int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
		       cpumask_var_t *aff_mask, int idx)
{}

void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask)
{}

static int request_msix_queue_irqs(struct adapter *adap)
{}

static void free_msix_queue_irqs(struct adapter *adap)
{}

static int setup_ppod_edram(struct adapter *adap)
{}

static void adap_config_hpfilter(struct adapter *adapter)
{}

static int cxgb4_config_rss(const struct port_info *pi, u16 *rss,
			    u16 rss_size, u16 viid)
{}

/**
 *	cxgb4_write_rss - write the RSS table for a given port
 *	@pi: the port
 *	@queues: array of queue indices for RSS
 *
 *	Sets up the portion of the HW RSS table for the port's VI to distribute
 *	packets to the Rx queues in @queues.
 *	Should never be called before setting up sge eth rx queues
 */
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
{}

/**
 *	setup_rss - configure RSS
 *	@adap: the adapter
 *
 *	Sets up RSS for each port.
 */
static int setup_rss(struct adapter *adap)
{}

/*
 * Return the channel of the ingress queue with the given qid.
 */
static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
{}

void cxgb4_quiesce_rx(struct sge_rspq *q)
{}

/*
 * Wait until all NAPI handlers are descheduled.
 */
static void quiesce_rx(struct adapter *adap)
{}

/* Disable interrupt and napi handler */
static void disable_interrupts(struct adapter *adap)
{}

void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
{}

/*
 * Enable NAPI scheduling and interrupt generation for all Rx queues.
 */
static void enable_rx(struct adapter *adap)
{}

static int setup_non_data_intr(struct adapter *adap)
{}

static int setup_fw_sge_queues(struct adapter *adap)
{}

/**
 *	setup_sge_queues - configure SGE Tx/Rx/response queues
 *	@adap: the adapter
 *
 *	Determines how many sets of SGE queues to use and initializes them.
 *	We support multiple queue sets per port if we have MSI-X, otherwise
 *	just one queue set per port.
 */
static int setup_sge_queues(struct adapter *adap)
{}

static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
			     struct net_device *sb_dev)
{}

static int closest_timer(const struct sge *s, int time)
{}

static int closest_thres(const struct sge *s, int thres)
{}

/**
 *	cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
 *	@q: the Rx queue
 *	@us: the hold-off time in us, or 0 to disable timer
 *	@cnt: the hold-off packet count, or 0 to disable counter
 *
 *	Sets an Rx queue's interrupt hold-off time and packet count.  At least
 *	one of the two needs to be enabled for the queue to generate interrupts.
 */
int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
			       unsigned int us, unsigned int cnt)
{}

static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
{}

static int setup_debugfs(struct adapter *adap)
{}

static void cxgb4_port_mirror_free_rxq(struct adapter *adap,
				       struct sge_eth_rxq *mirror_rxq)
{}

static int cxgb4_port_mirror_alloc_queues(struct net_device *dev)
{}

static void cxgb4_port_mirror_free_queues(struct net_device *dev)
{}

static int cxgb4_port_mirror_start(struct net_device *dev)
{}

static void cxgb4_port_mirror_stop(struct net_device *dev)
{}

int cxgb4_port_mirror_alloc(struct net_device *dev)
{}

void cxgb4_port_mirror_free(struct net_device *dev)
{}

/*
 * upper-layer driver support
 */

/*
 * Allocate an active-open TID and set it to the supplied value.
 */
int cxgb4_alloc_atid(struct tid_info *t, void *data)
{}
EXPORT_SYMBOL();

/*
 * Release an active-open TID.
 */
void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
{}
EXPORT_SYMBOL();

/*
 * Allocate a server TID and set it to the supplied value.
 */
int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
{}
EXPORT_SYMBOL();

/* Allocate a server filter TID and set it to the supplied value.
 */
int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
{}
EXPORT_SYMBOL();

/* Release a server TID.
 */
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
{}
EXPORT_SYMBOL();

/*
 * Populate a TID_RELEASE WR.  Caller must properly size the skb.
 */
static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
			   unsigned int tid)
{}

/*
 * Queue a TID release request and if necessary schedule a work queue to
 * process it.
 */
static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
				    unsigned int tid)
{}

/*
 * Process the list of pending TID release requests.
 */
static void process_tid_release_list(struct work_struct *work)
{}

/*
 * Release a TID and inform HW.  If we are unable to allocate the release
 * message we defer to a work queue.
 */
void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
		      unsigned short family)
{}
EXPORT_SYMBOL();

/*
 * Allocate and initialize the TID tables.  Returns 0 on success.
 */
static int tid_init(struct tid_info *t)
{}

/**
 *	cxgb4_create_server - create an IP server
 *	@dev: the device
 *	@stid: the server TID
 *	@sip: local IP address to bind server to
 *	@sport: the server's TCP port
 *	@vlan: the VLAN header information
 *	@queue: queue to direct messages from this server to
 *
 *	Create an IP server for the given port and address.
 *	Returns <0 on error and one of the %NET_XMIT_* values on success.
 */
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
			__be32 sip, __be16 sport, __be16 vlan,
			unsigned int queue)
{}
EXPORT_SYMBOL();

/*	cxgb4_create_server6 - create an IPv6 server
 *	@dev: the device
 *	@stid: the server TID
 *	@sip: local IPv6 address to bind server to
 *	@sport: the server's TCP port
 *	@queue: queue to direct messages from this server to
 *
 *	Create an IPv6 server for the given port and address.
 *	Returns <0 on error and one of the %NET_XMIT_* values on success.
 */
int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
			 const struct in6_addr *sip, __be16 sport,
			 unsigned int queue)
{}
EXPORT_SYMBOL();

int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
			unsigned int queue, bool ipv6)
{}
EXPORT_SYMBOL();

/**
 *	cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
 *	@mtus: the HW MTU table
 *	@mtu: the target MTU
 *	@idx: index of selected entry in the MTU table
 *
 *	Returns the index and the value in the HW MTU table that is closest to
 *	but does not exceed @mtu, unless @mtu is smaller than any value in the
 *	table, in which case that smallest available value is selected.
 */
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
			    unsigned int *idx)
{}
EXPORT_SYMBOL();

/**
 *     cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
 *     @mtus: the HW MTU table
 *     @header_size: Header Size
 *     @data_size_max: maximum Data Segment Size
 *     @data_size_align: desired Data Segment Size Alignment (2^N)
 *     @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
 *
 *     Similar to cxgb4_best_mtu() but instead of searching the Hardware
 *     MTU Table based solely on a Maximum MTU parameter, we break that
 *     parameter up into a Header Size and Maximum Data Segment Size, and
 *     provide a desired Data Segment Size Alignment.  If we find an MTU in
 *     the Hardware MTU Table which will result in a Data Segment Size with
 *     the requested alignment _and_ that MTU isn't "too far" from the
 *     closest MTU, then we'll return that rather than the closest MTU.
 */
unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
				    unsigned short header_size,
				    unsigned short data_size_max,
				    unsigned short data_size_align,
				    unsigned int *mtu_idxp)
{}
EXPORT_SYMBOL();

/**
 *	cxgb4_port_chan - get the HW channel of a port
 *	@dev: the net device for the port
 *
 *	Return the HW Tx channel of the given port.
 */
unsigned int cxgb4_port_chan(const struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 *      cxgb4_port_e2cchan - get the HW c-channel of a port
 *      @dev: the net device for the port
 *
 *      Return the HW RX c-channel of the given port.
 */
unsigned int cxgb4_port_e2cchan(const struct net_device *dev)
{}
EXPORT_SYMBOL();

unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
{}
EXPORT_SYMBOL();

/**
 *	cxgb4_port_viid - get the VI id of a port
 *	@dev: the net device for the port
 *
 *	Return the VI id of the given port.
 */
unsigned int cxgb4_port_viid(const struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 *	cxgb4_port_idx - get the index of a port
 *	@dev: the net device for the port
 *
 *	Return the index of the given port.
 */
unsigned int cxgb4_port_idx(const struct net_device *dev)
{}
EXPORT_SYMBOL();

void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
			 struct tp_tcp_stats *v6)
{}
EXPORT_SYMBOL();

void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
		      const unsigned int *pgsz_order)
{}
EXPORT_SYMBOL();

int cxgb4_flush_eq_cache(struct net_device *dev)
{}
EXPORT_SYMBOL();

static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
{}

int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
			u16 size)
{}
EXPORT_SYMBOL();

int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
{}
EXPORT_SYMBOL();

u64 cxgb4_read_sge_timestamp(struct net_device *dev)
{}
EXPORT_SYMBOL();

int cxgb4_bar2_sge_qregs(struct net_device *dev,
			 unsigned int qid,
			 enum cxgb4_bar2_qtype qtype,
			 int user,
			 u64 *pbar2_qoffset,
			 unsigned int *pbar2_qid)
{}
EXPORT_SYMBOL();

static struct pci_driver cxgb4_driver;

static void check_neigh_update(struct neighbour *neigh)
{}

static int netevent_cb(struct notifier_block *nb, unsigned long event,
		       void *data)
{}

static bool netevent_registered;
static struct notifier_block cxgb4_netevent_nb =;

static void drain_db_fifo(struct adapter *adap, int usecs)
{}

static void disable_txq_db(struct sge_txq *q)
{}

static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
{}

static void disable_dbs(struct adapter *adap)
{}

static void enable_dbs(struct adapter *adap)
{}

static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
{}

static void process_db_full(struct work_struct *work)
{}

static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
{}

static void recover_all_queues(struct adapter *adap)
{}

static void process_db_drop(struct work_struct *work)
{}

void t4_db_full(struct adapter *adap)
{}

void t4_db_dropped(struct adapter *adap)
{}

void t4_register_netevent_notifier(void)
{}

static void detach_ulds(struct adapter *adap)
{}

static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
{}

#if IS_ENABLED(CONFIG_IPV6)
static int cxgb4_inet6addr_handler(struct notifier_block *this,
				   unsigned long event, void *data)
{}

static bool inet6addr_registered;
static struct notifier_block cxgb4_inet6addr_notifier =;

static void update_clip(const struct adapter *adap)
{}
#endif /* IS_ENABLED(CONFIG_IPV6) */

/**
 *	cxgb_up - enable the adapter
 *	@adap: adapter being enabled
 *
 *	Called when the first port is enabled, this function performs the
 *	actions necessary to make an adapter operational, such as completing
 *	the initialization of HW modules, and enabling interrupts.
 *
 *	Must be called with the rtnl lock held.
 */
static int cxgb_up(struct adapter *adap)
{}

static void cxgb_down(struct adapter *adapter)
{}

/*
 * net_device operations
 */
static int cxgb_open(struct net_device *dev)
{}

static int cxgb_close(struct net_device *dev)
{}

int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
		__be32 sip, __be16 sport, __be16 vlan,
		unsigned int queue, unsigned char port, unsigned char mask)
{}
EXPORT_SYMBOL();

int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
		unsigned int queue, bool ipv6)
{}
EXPORT_SYMBOL();

static void cxgb_get_stats(struct net_device *dev,
			   struct rtnl_link_stats64 *ns)
{}

static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{}

static void cxgb_set_rxmode(struct net_device *dev)
{}

static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
{}

#ifdef CONFIG_PCI_IOV
static int cxgb4_mgmt_open(struct net_device *dev)
{}

/* Fill MAC address that will be assigned by the FW */
static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
{}

static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{}

static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
				    int vf, struct ifla_vf_info *ivi)
{}

static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
				       struct netdev_phys_item_id *ppid)
{}

static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
				  int min_tx_rate, int max_tx_rate)
{}

static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
				  u16 vlan, u8 qos, __be16 vlan_proto)
{}

static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf,
					int link)
{}
#endif /* CONFIG_PCI_IOV */

static int cxgb_set_mac_addr(struct net_device *dev, void *p)
{}

#ifdef CONFIG_NET_POLL_CONTROLLER
static void cxgb_netpoll(struct net_device *dev)
{}
#endif

static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
{}

static int cxgb_setup_tc_flower(struct net_device *dev,
				struct flow_cls_offload *cls_flower)
{}

static int cxgb_setup_tc_cls_u32(struct net_device *dev,
				 struct tc_cls_u32_offload *cls_u32)
{}

static int cxgb_setup_tc_matchall(struct net_device *dev,
				  struct tc_cls_matchall_offload *cls_matchall,
				  bool ingress)
{}

static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
					  void *type_data, void *cb_priv)
{}

static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
					 void *type_data, void *cb_priv)
{}

static int cxgb_setup_tc_mqprio(struct net_device *dev,
				struct tc_mqprio_qopt_offload *mqprio)
{}

static LIST_HEAD(cxgb_block_cb_list);

static int cxgb_setup_tc_block(struct net_device *dev,
			       struct flow_block_offload *f)
{}

static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
			 void *type_data)
{}

static int cxgb_udp_tunnel_unset_port(struct net_device *netdev,
				      unsigned int table, unsigned int entry,
				      struct udp_tunnel_info *ti)
{}

static int cxgb_udp_tunnel_set_port(struct net_device *netdev,
				    unsigned int table, unsigned int entry,
				    struct udp_tunnel_info *ti)
{}

static const struct udp_tunnel_nic_info cxgb_udp_tunnels =;

static netdev_features_t cxgb_features_check(struct sk_buff *skb,
					     struct net_device *dev,
					     netdev_features_t features)
{}

static netdev_features_t cxgb_fix_features(struct net_device *dev,
					   netdev_features_t features)
{}

static const struct net_device_ops cxgb4_netdev_ops =;

#ifdef CONFIG_PCI_IOV
static const struct net_device_ops cxgb4_mgmt_netdev_ops =;

static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
				   struct ethtool_drvinfo *info)
{}

static const struct ethtool_ops cxgb4_mgmt_ethtool_ops =;
#endif

static void notify_fatal_err(struct work_struct *work)
{}

void t4_fatal_err(struct adapter *adap)
{}

static void setup_memwin(struct adapter *adap)
{}

static void setup_memwin_rdma(struct adapter *adap)
{}

/* HMA Definitions */

/* The maximum number of address that can be send in a single FW cmd */
#define HMA_MAX_ADDR_IN_CMD

#define HMA_PAGE_SIZE

#define HMA_MAX_NO_FW_ADDRESS

#define HMA_PAGE_ORDER

/* The minimum and maximum possible HMA sizes that can be specified in the FW
 * configuration(in units of MB).
 */
#define HMA_MIN_TOTAL_SIZE
#define HMA_MAX_TOTAL_SIZE

static void adap_free_hma_mem(struct adapter *adapter)
{}

static int adap_config_hma(struct adapter *adapter)
{}

static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
{}

/*
 * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
 */
#define MAX_ATIDS

/*
 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
 *
 * If the firmware we're dealing with has Configuration File support, then
 * we use that to perform all configuration
 */

/*
 * Tweak configuration based on module parameters, etc.  Most of these have
 * defaults assigned to them by Firmware Configuration Files (if we're using
 * them) but need to be explicitly set if we're using hard-coded
 * initialization.  But even in the case of using Firmware Configuration
 * Files, we'd like to expose the ability to change these via module
 * parameters so these are essentially common tweaks/settings for
 * Configuration Files and hard-coded initialization ...
 */
static int adap_init0_tweaks(struct adapter *adapter)
{}

/* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
 * unto themselves and they contain their own firmware to perform their
 * tasks ...
 */
static int phy_aq1202_version(const u8 *phy_fw_data,
			      size_t phy_fw_size)
{}

static struct info_10gbt_phy_fw {} phy_info_array[] =;

static struct info_10gbt_phy_fw *find_phy_info(int devid)
{}

/* Handle updating of chip-external 10Gb/s-BT PHY firmware.  This needs to
 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD.  On error
 * we return a negative error number.  If we transfer new firmware we return 1
 * (from t4_load_phy_fw()).  If we don't do anything we return 0.
 */
static int adap_init0_phy(struct adapter *adap)
{}

/*
 * Attempt to initialize the adapter via a Firmware Configuration File.
 */
static int adap_init0_config(struct adapter *adapter, int reset)
{}

static struct fw_info fw_info_array[] =;

static struct fw_info *find_fw_info(int chip)
{}

/*
 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
 */
static int adap_init0(struct adapter *adap, int vpd_skip)
{}

/* EEH callbacks */

static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
					 pci_channel_state_t state)
{}

static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
{}

static void eeh_resume(struct pci_dev *pdev)
{}

static void eeh_reset_prepare(struct pci_dev *pdev)
{}

static void eeh_reset_done(struct pci_dev *pdev)
{}

static const struct pci_error_handlers cxgb4_eeh =;

/* Return true if the Link Configuration supports "High Speeds" (those greater
 * than 1Gb/s).
 */
static inline bool is_x_10g_port(const struct link_config *lc)
{}

/* Perform default configuration of DMA queues depending on the number and type
 * of ports we found and the number of available CPUs.  Most settings can be
 * modified by the admin prior to actual use.
 */
static int cfg_queues(struct adapter *adap)
{}

/*
 * Reduce the number of Ethernet queues across all ports to at most n.
 * n provides at least one queue per port.
 */
static void reduce_ethqs(struct adapter *adap, int n)
{}

static int alloc_msix_info(struct adapter *adap, u32 num_vec)
{}

static void free_msix_info(struct adapter *adap)
{}

int cxgb4_get_msix_idx_from_bmap(struct adapter *adap)
{}

void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
				 unsigned int msix_idx)
{}

/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
#define EXTRA_VECS

static int enable_msix(struct adapter *adap)
{}

#undef EXTRA_VECS

static int init_rss(struct adapter *adap)
{}

/* Dump basic information about the adapter */
static void print_adapter_info(struct adapter *adapter)
{}

static void print_port_info(const struct net_device *dev)
{}

/*
 * Free the following resources:
 * - memory used for tables
 * - MSI/MSI-X
 * - net devices
 * - resources FW is holding for us
 */
static void free_some_resources(struct adapter *adapter)
{}

#define TSO_FLAGS
#define VLAN_FEAT
#define SEGMENT_SIZE

static int t4_get_chip_type(struct adapter *adap, int ver)
{}

#ifdef CONFIG_PCI_IOV
static void cxgb4_mgmt_setup(struct net_device *dev)
{}

static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
{}
#endif /* CONFIG_PCI_IOV */

#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) || IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)

static int chcr_offload_state(struct adapter *adap,
			      enum cxgb4_netdev_tls_ops op_val)
{}

#endif /* CONFIG_CHELSIO_TLS_DEVICE || CONFIG_CHELSIO_IPSEC_INLINE */

#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)

static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
			      enum tls_offload_ctx_dir direction,
			      struct tls_crypto_info *crypto_info,
			      u32 tcp_sn)
{}

static void cxgb4_ktls_dev_del(struct net_device *netdev,
			       struct tls_context *tls_ctx,
			       enum tls_offload_ctx_dir direction)
{}

static const struct tlsdev_ops cxgb4_ktls_ops =;
#endif /* CONFIG_CHELSIO_TLS_DEVICE */

#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)

static int cxgb4_xfrm_add_state(struct xfrm_state *x,
				struct netlink_ext_ack *extack)
{}

static void cxgb4_xfrm_del_state(struct xfrm_state *x)
{}

static void cxgb4_xfrm_free_state(struct xfrm_state *x)
{}

static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
{}

static void cxgb4_advance_esn_state(struct xfrm_state *x)
{}

static const struct xfrmdev_ops cxgb4_xfrmdev_ops =;

#endif /* CONFIG_CHELSIO_IPSEC_INLINE */

static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{}

static void remove_one(struct pci_dev *pdev)
{}

/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
 * delivery.  This is essentially a stripped down version of the PCI remove()
 * function where we do the minimal amount of work necessary to shutdown any
 * further activity.
 */
static void shutdown_one(struct pci_dev *pdev)
{}

static struct pci_driver cxgb4_driver =;

static int __init cxgb4_init_module(void)
{}

static void __exit cxgb4_cleanup_module(void)
{}

module_init();
module_exit(cxgb4_cleanup_module);