linux/drivers/net/ethernet/qlogic/qede/qede_fp.c

// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qede NIC Driver
 * Copyright (c) 2015-2017  QLogic Corporation
 * Copyright (c) 2019-2020 Marvell International Ltd.
 */

#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/bpf_trace.h>
#include <net/udp_tunnel.h>
#include <linux/ip.h>
#include <net/gro.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <net/ip6_checksum.h>
#include "qede_ptp.h"

#include <linux/qed/qed_if.h>
#include "qede.h"
/*********************************
 * Content also used by slowpath *
 *********************************/

int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
{}

/* Unmap the data and free skb */
int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
{}

/* Unmap the data and free skb when mapping failed during start_xmit */
static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
				    struct eth_tx_1st_bd *first_bd,
				    int nbd, bool data_split)
{}

static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
{}

static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
					 struct eth_tx_2nd_bd *second_bd,
					 struct eth_tx_3rd_bd *third_bd)
{}

static int map_frag_to_bd(struct qede_tx_queue *txq,
			  skb_frag_t *frag, struct eth_tx_bd *bd)
{}

static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
{}

/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
{}
#endif

static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
{}

static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad,
			 u16 len, struct page *page, struct xdp_frame *xdpf)
{}

int qede_xdp_transmit(struct net_device *dev, int n_frames,
		      struct xdp_frame **frames, u32 flags)
{}

int qede_txq_has_work(struct qede_tx_queue *txq)
{}

static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{}

static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{}

bool qede_has_rx_work(struct qede_rx_queue *rxq)
{}

static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
{}

/* This function reuses the buffer(from an offset) from
 * consumer index to producer index in the bd ring
 */
static inline void qede_reuse_page(struct qede_rx_queue *rxq,
				   struct sw_rx_data *curr_cons)
{}

/* In case of allocation failures reuse buffers
 * from consumer index to produce buffers for firmware
 */
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
{}

static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
					 struct sw_rx_data *curr_cons)
{}

void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
{}

static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
{}

static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
{}

static inline void qede_skb_receive(struct qede_dev *edev,
				    struct qede_fastpath *fp,
				    struct qede_rx_queue *rxq,
				    struct sk_buff *skb, u16 vlan_tag)
{}

static void qede_set_gro_params(struct qede_dev *edev,
				struct sk_buff *skb,
				struct eth_fast_path_rx_tpa_start_cqe *cqe)
{}

static int qede_fill_frag_skb(struct qede_dev *edev,
			      struct qede_rx_queue *rxq,
			      u8 tpa_agg_index, u16 len_on_bd)
{}

static bool qede_tunn_exist(u16 flag)
{}

static u8 qede_check_tunn_csum(u16 flag)
{}

static inline struct sk_buff *
qede_build_skb(struct qede_rx_queue *rxq,
	       struct sw_rx_data *bd, u16 len, u16 pad)
{}

static struct sk_buff *
qede_tpa_rx_build_skb(struct qede_dev *edev,
		      struct qede_rx_queue *rxq,
		      struct sw_rx_data *bd, u16 len, u16 pad,
		      bool alloc_skb)
{}

static struct sk_buff *
qede_rx_build_skb(struct qede_dev *edev,
		  struct qede_rx_queue *rxq,
		  struct sw_rx_data *bd, u16 len, u16 pad)
{}

static void qede_tpa_start(struct qede_dev *edev,
			   struct qede_rx_queue *rxq,
			   struct eth_fast_path_rx_tpa_start_cqe *cqe)
{}

#ifdef CONFIG_INET
static void qede_gro_ip_csum(struct sk_buff *skb)
{}

static void qede_gro_ipv6_csum(struct sk_buff *skb)
{}
#endif

static void qede_gro_receive(struct qede_dev *edev,
			     struct qede_fastpath *fp,
			     struct sk_buff *skb,
			     u16 vlan_tag)
{}

static inline void qede_tpa_cont(struct qede_dev *edev,
				 struct qede_rx_queue *rxq,
				 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
{}

static int qede_tpa_end(struct qede_dev *edev,
			struct qede_fastpath *fp,
			struct eth_fast_path_rx_tpa_end_cqe *cqe)
{}

static u8 qede_check_notunn_csum(u16 flag)
{}

static u8 qede_check_csum(u16 flag)
{}

static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
				      u16 flag)
{}

/* Return true iff packet is to be passed to stack */
static bool qede_rx_xdp(struct qede_dev *edev,
			struct qede_fastpath *fp,
			struct qede_rx_queue *rxq,
			struct bpf_prog *prog,
			struct sw_rx_data *bd,
			struct eth_fast_path_rx_reg_cqe *cqe,
			u16 *data_offset, u16 *len)
{}

static int qede_rx_build_jumbo(struct qede_dev *edev,
			       struct qede_rx_queue *rxq,
			       struct sk_buff *skb,
			       struct eth_fast_path_rx_reg_cqe *cqe,
			       u16 first_bd_len)
{}

static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
				   struct qede_fastpath *fp,
				   struct qede_rx_queue *rxq,
				   union eth_rx_cqe *cqe,
				   enum eth_rx_cqe_type type)
{}

static int qede_rx_process_cqe(struct qede_dev *edev,
			       struct qede_fastpath *fp,
			       struct qede_rx_queue *rxq)
{}

static int qede_rx_int(struct qede_fastpath *fp, int budget)
{}

static bool qede_poll_is_more_work(struct qede_fastpath *fp)
{}

/*********************
 * NDO & API related *
 *********************/
int qede_poll(struct napi_struct *napi, int budget)
{}

irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
{}

/* Main transmit function */
netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{}

u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
		      struct net_device *sb_dev)
{}

/* 8B udp header + 8B base tunnel header + 32B option length */
#define QEDE_MAX_TUN_HDR_LEN

netdev_features_t qede_features_check(struct sk_buff *skb,
				      struct net_device *dev,
				      netdev_features_t features)
{}