linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c

// SPDX-License-Identifier: GPL-2.0
/* Marvell RVU Ethernet driver
 *
 * Copyright (C) 2020 Marvell.
 *
 */

#include <linux/etherdevice.h>
#include <net/ip.h>
#include <net/tso.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <net/ip6_checksum.h>

#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
#include "otx2_txrx.h"
#include "otx2_ptp.h"
#include "cn10k.h"

#define CQE_ADDR(CQ, idx)
#define PTP_PORT
/* PTPv2 header Original Timestamp starts at byte offset 34 and
 * contains 6 byte seconds field and 4 byte nano seconds field.
 */
#define PTP_SYNC_SEC_OFFSET

static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
				     struct bpf_prog *prog,
				     struct nix_cqe_rx_s *cqe,
				     struct otx2_cq_queue *cq,
				     bool *need_xdp_flush);

static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
				 struct otx2_cq_queue *cq)
{}

static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
{}

static unsigned int frag_num(unsigned int i)
{}

static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
					struct sk_buff *skb, int seg, int *len)
{}

static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
{}

static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
				     struct otx2_snd_queue *sq,
				 struct nix_cqe_tx_s *cqe)
{}

static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
				 struct otx2_cq_queue *cq,
				 struct otx2_snd_queue *sq,
				 struct nix_cqe_tx_s *cqe,
				 int budget, int *tx_pkts, int *tx_bytes)
{}

static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
			      struct sk_buff *skb, void *data)
{}

static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
			      u64 iova, int len, struct nix_rx_parse_s *parse,
			      int qidx)
{}

static void otx2_set_rxhash(struct otx2_nic *pfvf,
			    struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
{}

static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
			      int qidx)
{}

static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
				  struct nix_cqe_rx_s *cqe, int qidx)
{}

static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
				 struct napi_struct *napi,
				 struct otx2_cq_queue *cq,
				 struct nix_cqe_rx_s *cqe, bool *need_xdp_flush)
{}

static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
				struct napi_struct *napi,
				struct otx2_cq_queue *cq, int budget)
{}

int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
{}

static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
				struct otx2_cq_queue *cq, int budget)
{}

static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
{}

int otx2_napi_handler(struct napi_struct *napi, int budget)
{}

void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
		    int size, int qidx)
{}

#define MAX_SEGS_PER_SG
/* Add SQE scatter/gather subdescriptor structure */
static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
			    struct sk_buff *skb, int num_segs, int *offset)
{}

/* Add SQE extended header subdescriptor */
static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
			     struct sk_buff *skb, int *offset)
{}

static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
			     int alg, u64 iova, int ptp_offset,
			     u64 base_ns, bool udp_csum_crt)
{}

/* Add SQE header subdescriptor structure */
static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
			     struct nix_sqe_hdr_s *sqe_hdr,
			     struct sk_buff *skb, u16 qidx)
{}

static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
				struct otx2_snd_queue *sq,
				struct sk_buff *skb, int sqe, int hdr_len)
{}

static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq,
				  struct sk_buff *skb, int seg,
				  u64 seg_addr, int hdr_len, int sqe)
{}

static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq,
				struct sg_list *list, int *offset)
{}

static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
			       struct sk_buff *skb, u16 qidx)
{}

static bool is_hw_tso_supported(struct otx2_nic *pfvf,
				struct sk_buff *skb)
{}

static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
{}

static bool otx2_validate_network_transport(struct sk_buff *skb)
{}

static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, bool *udp_csum_crt)
{}

static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
			      struct otx2_snd_queue *sq, int *offset)
{}

bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
			struct sk_buff *skb, u16 qidx)
{}
EXPORT_SYMBOL();

void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx)
{}

void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
{}

int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
{}

void otx2_free_pending_sqe(struct otx2_nic *pfvf)
{}

static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
				int len, int *offset)
{}

bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
{}

static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
				     struct bpf_prog *prog,
				     struct nix_cqe_rx_s *cqe,
				     struct otx2_cq_queue *cq,
				     bool *need_xdp_flush)
{}