linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c

/* bnx2x_cmn.c: QLogic Everest network driver.
 *
 * Copyright (c) 2007-2013 Broadcom Corporation
 * Copyright (c) 2014 QLogic Corporation
 * All rights reserved
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 *
 * Maintained by: Ariel Elior <[email protected]>
 * Written by: Eliezer Tamir
 * Based on code from Michael Chan's bnx2 driver
 * UDP CSUM errata workaround by Arik Gendelman
 * Slowpath and fastpath rework by Vladislav Zolotarov
 * Statistics and Link management by Yitchak Gertner
 *
 */

#define pr_fmt(fmt)

#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/crash_dump.h>
#include <net/tcp.h>
#include <net/gro.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/prefetch.h>
#include "bnx2x_cmn.h"
#include "bnx2x_init.h"
#include "bnx2x_sp.h"

static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
static int bnx2x_poll(struct napi_struct *napi, int budget);

static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
{}

static void bnx2x_add_all_napi(struct bnx2x *bp)
{}

static int bnx2x_calc_num_queues(struct bnx2x *bp)
{}

/**
 * bnx2x_move_fp - move content of the fastpath structure.
 *
 * @bp:		driver handle
 * @from:	source FP index
 * @to:		destination FP index
 *
 * Makes sure the contents of the bp->fp[to].napi is kept
 * intact. This is done by first copying the napi struct from
 * the target to the source, and then mem copying the entire
 * source onto the target. Update txdata pointers and related
 * content.
 */
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{}

/**
 * bnx2x_fill_fw_str - Fill buffer with FW version string.
 *
 * @bp:        driver handle
 * @buf:       character buffer to fill with the fw name
 * @buf_len:   length of the above buffer
 *
 */
void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
{}

/**
 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
 *
 * @bp:	driver handle
 * @delta:	number of eth queues which were not allocated
 */
static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
{}

int bnx2x_load_count[2][3] =; /* per-path: 0-common, 1-port0, 2-port1 */

/* free skb in the packet ring at pos idx
 * return idx of last bd freed
 */
static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
			     u16 idx, unsigned int *pkts_compl,
			     unsigned int *bytes_compl)
{}

int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
{}

static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
					     u16 idx)
{}

static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
					 u16 sge_len,
					 struct eth_end_agg_rx_cqe *cqe)
{}

/* Get Toeplitz hash value in the skb using the value from the
 * CQE (calculated by HW).
 */
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
			    const struct eth_fast_path_rx_cqe *cqe,
			    enum pkt_hash_types *rxhash_type)
{}

static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
			    u16 cons, u16 prod,
			    struct eth_fast_path_rx_cqe *cqe)
{}

/* Timestamp option length allowed for TPA aggregation:
 *
 *		nop nop kind length echo val
 */
#define TPA_TSTAMP_OPT_LEN
/**
 * bnx2x_set_gro_params - compute GRO values
 *
 * @skb:		packet skb
 * @parsing_flags:	parsing flags from the START CQE
 * @len_on_bd:		total length of the first packet for the
 *			aggregation.
 * @pkt_len:		length of all segments
 * @num_of_coalesced_segs: count of segments
 *
 * Approximate value of the MSS for this aggregation calculated using
 * the first packet of it.
 * Compute number of aggregated segments, and gso_type.
 */
static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
				 u16 len_on_bd, unsigned int pkt_len,
				 u16 num_of_coalesced_segs)
{}

static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			      u16 index, gfp_t gfp_mask)
{}

static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			       struct bnx2x_agg_info *tpa_info,
			       u16 pages,
			       struct sk_buff *skb,
			       struct eth_end_agg_rx_cqe *cqe,
			       u16 cqe_idx)
{}

static struct sk_buff *
bnx2x_build_skb(const struct bnx2x_fastpath *fp, void *data)
{}

static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
{}

static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
{}

#ifdef CONFIG_INET
static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
{}

static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
{}

static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
			    void (*gro_func)(struct bnx2x*, struct sk_buff*))
{}
#endif

static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			       struct sk_buff *skb)
{}

static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			   struct bnx2x_agg_info *tpa_info,
			   u16 pages,
			   struct eth_end_agg_rx_cqe *cqe,
			   u16 cqe_idx)
{}

static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			       u16 index, gfp_t gfp_mask)
{}

static
void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
				 struct bnx2x_fastpath *fp,
				 struct bnx2x_eth_q_stats *qstats)
{}

static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{}

static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
{}

/* HW Lock for shared dual port PHYs */
void bnx2x_acquire_phy_lock(struct bnx2x *bp)
{}

void bnx2x_release_phy_lock(struct bnx2x *bp)
{}

/* calculates MF speed according to current linespeed and MF configuration */
u16 bnx2x_get_mf_speed(struct bnx2x *bp)
{}

/**
 * bnx2x_fill_report_data - fill link report data to report
 *
 * @bp:		driver handle
 * @data:	link state to update
 *
 * It uses a none-atomic bit operations because is called under the mutex.
 */
static void bnx2x_fill_report_data(struct bnx2x *bp,
				   struct bnx2x_link_report_data *data)
{}

/**
 * bnx2x_link_report - report link status to OS.
 *
 * @bp:		driver handle
 *
 * Calls the __bnx2x_link_report() under the same locking scheme
 * as a link/PHY state managing code to ensure a consistent link
 * reporting.
 */

void bnx2x_link_report(struct bnx2x *bp)
{}

/**
 * __bnx2x_link_report - report link status to OS.
 *
 * @bp:		driver handle
 *
 * None atomic implementation.
 * Should be called under the phy_lock.
 */
void __bnx2x_link_report(struct bnx2x *bp)
{}

static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
{}

static void bnx2x_free_tpa_pool(struct bnx2x *bp,
				struct bnx2x_fastpath *fp, int last)
{}

void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
{}

void bnx2x_init_rx_rings(struct bnx2x *bp)
{}

static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
{}

static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
{}

static void bnx2x_free_tx_skbs(struct bnx2x *bp)
{}

static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
{}

static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
{}

static void bnx2x_free_rx_skbs(struct bnx2x *bp)
{}

static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
{}

void bnx2x_free_skbs(struct bnx2x *bp)
{}

void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
{}

/**
 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
 *
 * @bp:		driver handle
 * @nvecs:	number of vectors to be released
 */
static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
{}

void bnx2x_free_irq(struct bnx2x *bp)
{}

int bnx2x_enable_msix(struct bnx2x *bp)
{}

static int bnx2x_req_msix_irqs(struct bnx2x *bp)
{}

int bnx2x_enable_msi(struct bnx2x *bp)
{}

static int bnx2x_req_irq(struct bnx2x *bp)
{}

static int bnx2x_setup_irqs(struct bnx2x *bp)
{}

static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
{}

static void bnx2x_napi_enable(struct bnx2x *bp)
{}

static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
{}

static void bnx2x_napi_disable(struct bnx2x *bp)
{}

void bnx2x_netif_start(struct bnx2x *bp)
{}

void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
{}

u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
		       struct net_device *sb_dev)
{}

void bnx2x_set_num_queues(struct bnx2x *bp)
{}

/**
 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
 *
 * @bp:		Driver handle
 * @include_cnic: handle cnic case
 *
 * We currently support for at most 16 Tx queues for each CoS thus we will
 * allocate a multiple of 16 for ETH L2 rings according to the value of the
 * bp->max_cos.
 *
 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
 * index after all ETH L2 indices.
 *
 * If the actual number of Tx queues (for each CoS) is less than 16 then there
 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
 * 16..31,...) with indices that are not coupled with any real Tx queue.
 *
 * The proper configuration of skb->queue_mapping is handled by
 * bnx2x_select_queue() and __skb_tx_hash().
 *
 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
 */
static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
{}

static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
{}

static int bnx2x_init_rss(struct bnx2x *bp)
{}

int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
	      bool config_hash, bool enable)
{}

static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
{}

/*
 * Cleans the object that have internal lists without sending
 * ramrods. Should be run when interrupts are disabled.
 */
void bnx2x_squeeze_objects(struct bnx2x *bp)
{}

#ifndef BNX2X_STOP_ON_ERROR
#define LOAD_ERROR_EXIT(bp, label)

#define LOAD_ERROR_EXIT_CNIC(bp, label)
#else /*BNX2X_STOP_ON_ERROR*/
#define LOAD_ERROR_EXIT
#define LOAD_ERROR_EXIT_CNIC
#endif /*BNX2X_STOP_ON_ERROR*/

static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
{}

static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
{}

/* send load request to mcp and analyze response */
static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
{}

/* check whether another PF has already loaded FW to chip. In
 * virtualized environments a pf from another VM may have already
 * initialized the device including loading FW
 */
int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
{}

/* returns the "mcp load_code" according to global load_count array */
static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
{}

/* mark PMF if applicable */
static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
{}

static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
{}

/**
 * bnx2x_bz_fp - zero content of the fastpath structure.
 *
 * @bp:		driver handle
 * @index:	fastpath index to be zeroed
 *
 * Makes sure the contents of the bp->fp[index].napi is kept
 * intact.
 */
static void bnx2x_bz_fp(struct bnx2x *bp, int index)
{}

void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
{}

int bnx2x_load_cnic(struct bnx2x *bp)
{}

/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{}

int bnx2x_drain_tx_queues(struct bnx2x *bp)
{}

/* must be called with rtnl_lock */
int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
{}

int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
{}

/*
 * net_device service functions
 */
static int bnx2x_poll(struct napi_struct *napi, int budget)
{}

/* we split the first BD into headers and data BDs
 * to ease the pain of our fellow microcode engineers
 * we use one mapping for both BDs
 */
static u16 bnx2x_tx_split(struct bnx2x *bp,
			  struct bnx2x_fp_txdata *txdata,
			  struct sw_tx_bd *tx_buf,
			  struct eth_tx_start_bd **tx_bd, u16 hlen,
			  u16 bd_prod)
{}

#define bswab32(b32)
#define bswab16(b16)
static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
{}

static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
{}

/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS

/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
#define BNX2X_NUM_TSO_WIN_SUB_BDS

#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
/* check if packet requires linearization (packet is too fragmented)
   no need to check fragmentation if page size > 8K (there will be no
   violation to FW restrictions) */
static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
			     u32 xmit_type)
{}
#endif

/**
 * bnx2x_set_pbd_gso - update PBD in GSO case.
 *
 * @skb:	packet skb
 * @pbd:	parse BD
 * @xmit_type:	xmit flags
 */
static void bnx2x_set_pbd_gso(struct sk_buff *skb,
			      struct eth_tx_parse_bd_e1x *pbd,
			      u32 xmit_type)
{}

/**
 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
 *
 * @bp:			driver handle
 * @skb:		packet skb
 * @parsing_data:	data to be updated
 * @xmit_type:		xmit flags
 *
 * 57712/578xx related, when skb has encapsulation
 */
static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
				 u32 *parsing_data, u32 xmit_type)
{}

/**
 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
 *
 * @bp:			driver handle
 * @skb:		packet skb
 * @parsing_data:	data to be updated
 * @xmit_type:		xmit flags
 *
 * 57712/578xx related
 */
static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
				u32 *parsing_data, u32 xmit_type)
{}

/* set FW indication according to inner or outer protocols if tunneled */
static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
			       struct eth_tx_start_bd *tx_start_bd,
			       u32 xmit_type)
{}

/**
 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
 *
 * @bp:		driver handle
 * @skb:	packet skb
 * @pbd:	parse BD to be updated
 * @xmit_type:	xmit flags
 */
static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
			     struct eth_tx_parse_bd_e1x *pbd,
			     u32 xmit_type)
{}

static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
				      struct eth_tx_parse_bd_e2 *pbd_e2,
				      struct eth_tx_parse_2nd_bd *pbd2,
				      u16 *global_data,
				      u32 xmit_type)
{}

static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
					 u32 xmit_type)
{}

/* called with netif_tx_lock
 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
 * netif_wake_queue()
 */
netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
{}

void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
{}

/**
 * bnx2x_setup_tc - routine to configure net_device for multi tc
 *
 * @dev: net device to configure
 * @num_tc: number of traffic classes to enable
 *
 * callback connected to the ndo_setup_tc function pointer
 */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
{}

int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
		     void *type_data)
{}

/* called with rtnl_lock */
int bnx2x_change_mac_addr(struct net_device *dev, void *p)
{}

static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
{}

static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
{}

void bnx2x_free_fp_mem(struct bnx2x *bp)
{}

static void set_sb_shortcuts(struct bnx2x *bp, int index)
{}

/* Returns the number of actually allocated BDs */
static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
			      int rx_ring_size)
{}

static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
{}

static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
{}

static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
{}

static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
{}

void bnx2x_free_mem_bp(struct bnx2x *bp)
{}

int bnx2x_alloc_mem_bp(struct bnx2x *bp)
{}

int bnx2x_reload_if_running(struct net_device *dev)
{}

int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
{}
int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
{}

#ifdef NETDEV_FCOE_WWNN
int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
{}
#endif

/* called with rtnl_lock */
int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
{}

netdev_features_t bnx2x_fix_features(struct net_device *dev,
				     netdev_features_t features)
{}

int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
{}

void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue)
{}

static int __maybe_unused bnx2x_suspend(struct device *dev_d)
{}

static int __maybe_unused bnx2x_resume(struct device *dev_d)
{}

SIMPLE_DEV_PM_OPS(bnx2x_pm_ops, bnx2x_suspend, bnx2x_resume);

void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
			      u32 cid)
{}

static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
				    u8 fw_sb_id, u8 sb_index,
				    u8 ticks)
{}

static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
				    u16 fw_sb_id, u8 sb_index,
				    u8 disable)
{}

void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
				    u8 sb_index, u8 disable, u16 usec)
{}

void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
			    u32 verbose)
{}