linux/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h

/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2014-2016 Freescale Semiconductor Inc.
 * Copyright 2016-2022 NXP
 */

#ifndef __DPAA2_ETH_H
#define __DPAA2_ETH_H

#include <linux/dcbnl.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/fsl/mc.h>
#include <linux/net_tstamp.h>
#include <net/devlink.h>
#include <net/xdp.h>

#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
#include "dpni.h"
#include "dpni-cmd.h"

#include "dpaa2-eth-trace.h"
#include "dpaa2-eth-debugfs.h"
#include "dpaa2-mac.h"

#define DPAA2_WRIOP_VERSION(x, y, z)

#define DPAA2_ETH_STORE_SIZE

/* Maximum number of scatter-gather entries in an ingress frame,
 * considering the maximum receive frame size is 64K
 */
#define DPAA2_ETH_MAX_SG_ENTRIES

/* Maximum acceptable MTU value. It is in direct relation with the hardware
 * enforced Max Frame Length (currently 10k).
 */
#define DPAA2_ETH_MFL
#define DPAA2_ETH_MAX_MTU
/* Convert L3 MTU to L2 MFL */
#define DPAA2_ETH_L2_MAX_FRM(mtu)

/* Set the taildrop threshold (in bytes) to allow the enqueue of a large
 * enough number of jumbo frames in the Rx queues (length of the current
 * frame is not taken into account when making the taildrop decision)
 */
#define DPAA2_ETH_FQ_TAILDROP_THRESH

/* Maximum burst size value for Tx shaping */
#define DPAA2_ETH_MAX_BURST_SIZE

/* Maximum number of Tx confirmation frames to be processed
 * in a single NAPI call
 */
#define DPAA2_ETH_TXCONF_PER_NAPI

/* Maximum number of Tx frames to be processed in a single NAPI
 * call when AF_XDP is running. Bind it to DPAA2_ETH_TXCONF_PER_NAPI
 * to maximize the throughput.
 */
#define DPAA2_ETH_TX_ZC_PER_NAPI

/* Buffer qouta per channel. We want to keep in check number of ingress frames
 * in flight: for small sized frames, congestion group taildrop may kick in
 * first; for large sizes, Rx FQ taildrop threshold will ensure only a
 * reasonable number of frames will be pending at any given time.
 * Ingress frame drop due to buffer pool depletion should be a corner case only
 */
#define DPAA2_ETH_NUM_BUFS
#define DPAA2_ETH_REFILL_THRESH

/* Congestion group taildrop threshold: number of frames allowed to accumulate
 * at any moment in a group of Rx queues belonging to the same traffic class.
 * Choose value such that we don't risk depleting the buffer pool before the
 * taildrop kicks in
 */
#define DPAA2_ETH_CG_TAILDROP_THRESH(priv)

/* Congestion group notification threshold: when this many frames accumulate
 * on the Rx queues belonging to the same TC, the MAC is instructed to send
 * PFC frames for that TC.
 * When number of pending frames drops below exit threshold transmission of
 * PFC frames is stopped.
 */
#define DPAA2_ETH_CN_THRESH_ENTRY(priv)
#define DPAA2_ETH_CN_THRESH_EXIT(priv)

/* Maximum number of buffers that can be acquired/released through a single
 * QBMan command
 */
#define DPAA2_ETH_BUFS_PER_CMD

/* Hardware requires alignment for ingress/egress buffer addresses */
#define DPAA2_ETH_TX_BUF_ALIGN

#define DPAA2_ETH_RX_BUF_RAW_SIZE
#define DPAA2_ETH_RX_BUF_TAILROOM
#define DPAA2_ETH_RX_BUF_SIZE

/* Hardware annotation area in RX/TX buffers */
#define DPAA2_ETH_RX_HWA_SIZE
#define DPAA2_ETH_TX_HWA_SIZE

/* PTP nominal frequency 1GHz */
#define DPAA2_PTP_CLK_PERIOD_NS

/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
 * to 256B. For newer revisions, the requirement is only for 64B alignment
 */
#define DPAA2_ETH_RX_BUF_ALIGN_REV1
#define DPAA2_ETH_RX_BUF_ALIGN

/* The firmware allows assigning multiple buffer pools to a single DPNI -
 * maximum 8 DPBP objects. By default, only the first DPBP (idx 0) is used for
 * all queues. Thus, when enabling AF_XDP we must accommodate up to 9 DPBPs
 * object: the default and 8 other distinct buffer pools, one for each queue.
 */
#define DPAA2_ETH_DEFAULT_BP_IDX
#define DPAA2_ETH_MAX_BPS

/* We are accommodating a skb backpointer and some S/G info
 * in the frame's software annotation. The hardware
 * options are either 0 or 64, so we choose the latter.
 */
#define DPAA2_ETH_SWA_SIZE

/* We store different information in the software annotation area of a Tx frame
 * based on what type of frame it is
 */
enum dpaa2_eth_swa_type {};

/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
struct dpaa2_eth_swa {};

/* Annotation valid bits in FD FRC */
#define DPAA2_FD_FRC_FASV
#define DPAA2_FD_FRC_FAEADV
#define DPAA2_FD_FRC_FAPRV
#define DPAA2_FD_FRC_FAIADV
#define DPAA2_FD_FRC_FASWOV
#define DPAA2_FD_FRC_FAICFDV

/* Error bits in FD CTRL */
#define DPAA2_FD_RX_ERR_MASK
#define DPAA2_FD_TX_ERR_MASK

/* Annotation bits in FD CTRL */
#define DPAA2_FD_CTRL_ASAL

/* Frame annotation status */
struct dpaa2_fas {};

/* Frame annotation status word is located in the first 8 bytes
 * of the buffer's hardware annoatation area
 */
#define DPAA2_FAS_OFFSET
#define DPAA2_FAS_SIZE

/* Timestamp is located in the next 8 bytes of the buffer's
 * hardware annotation area
 */
#define DPAA2_TS_OFFSET

/* Frame annotation parse results */
struct dpaa2_fapr {};

#define DPAA2_FAPR_OFFSET
#define DPAA2_FAPR_SIZE

/* Frame annotation egress action descriptor */
#define DPAA2_FAEAD_OFFSET

struct dpaa2_faead {};

#define DPAA2_FAEAD_A2V
#define DPAA2_FAEAD_A4V
#define DPAA2_FAEAD_UPDV
#define DPAA2_FAEAD_EBDDV
#define DPAA2_FAEAD_UPD

struct ptp_tstamp {};

static inline void ns_to_ptp_tstamp(struct ptp_tstamp *tstamp, u64 ns)
{}

/* Accessors for the hardware annotation fields that we use */
static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
{}

static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
{}

static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa)
{}

static inline struct dpaa2_fapr *dpaa2_get_fapr(void *buf_addr, bool swa)
{}

static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
{}

/* Error and status bits in the frame annotation status word */
/* Debug frame, otherwise supposed to be discarded */
#define DPAA2_FAS_DISC
/* MACSEC frame */
#define DPAA2_FAS_MS
#define DPAA2_FAS_PTP
/* Ethernet multicast frame */
#define DPAA2_FAS_MC
/* Ethernet broadcast frame */
#define DPAA2_FAS_BC
#define DPAA2_FAS_KSE
#define DPAA2_FAS_EOFHE
#define DPAA2_FAS_MNLE
#define DPAA2_FAS_TIDE
#define DPAA2_FAS_PIEE
/* Frame length error */
#define DPAA2_FAS_FLE
/* Frame physical error */
#define DPAA2_FAS_FPE
#define DPAA2_FAS_PTE
#define DPAA2_FAS_ISP
#define DPAA2_FAS_PHE
#define DPAA2_FAS_BLE
/* L3 csum validation performed */
#define DPAA2_FAS_L3CV
/* L3 csum error */
#define DPAA2_FAS_L3CE
/* L4 csum validation performed */
#define DPAA2_FAS_L4CV
/* L4 csum error */
#define DPAA2_FAS_L4CE
/* Possible errors on the ingress path */
#define DPAA2_FAS_RX_ERR_MASK

/* Time in milliseconds between link state updates */
#define DPAA2_ETH_LINK_STATE_REFRESH

/* Number of times to retry a frame enqueue before giving up.
 * Value determined empirically, in order to minimize the number
 * of frames dropped on Tx
 */
#define DPAA2_ETH_ENQUEUE_RETRIES

/* Number of times to retry DPIO portal operations while waiting
 * for portal to finish executing current command and become
 * available. We want to avoid being stuck in a while loop in case
 * hardware becomes unresponsive, but not give up too easily if
 * the portal really is busy for valid reasons
 */
#define DPAA2_ETH_SWP_BUSY_RETRIES

/* Driver statistics, other than those in struct rtnl_link_stats64.
 * These are usually collected per-CPU and aggregated by ethtool.
 */
struct dpaa2_eth_drv_stats {};

/* Per-FQ statistics */
struct dpaa2_eth_fq_stats {};

/* Per-channel statistics */
struct dpaa2_eth_ch_stats {};

#define DPAA2_ETH_CH_STATS

/* Maximum number of queues associated with a DPNI */
#define DPAA2_ETH_MAX_TCS
#define DPAA2_ETH_MAX_RX_QUEUES_PER_TC
#define DPAA2_ETH_MAX_RX_QUEUES
#define DPAA2_ETH_MAX_TX_QUEUES
#define DPAA2_ETH_MAX_RX_ERR_QUEUES
#define DPAA2_ETH_MAX_QUEUES
#define DPAA2_ETH_MAX_NETDEV_QUEUES

#define DPAA2_ETH_MAX_DPCONS

enum dpaa2_eth_fq_type {};

struct dpaa2_eth_priv;
struct dpaa2_eth_channel;
struct dpaa2_eth_fq;

struct dpaa2_eth_xdp_fds {};

dpaa2_eth_consume_cb_t;

struct dpaa2_eth_fq {};

struct dpaa2_eth_ch_xdp {};

struct dpaa2_eth_bp {};

struct dpaa2_eth_channel {};

struct dpaa2_eth_dist_fields {};

struct dpaa2_eth_cls_rule {};

#define DPAA2_ETH_SGT_CACHE_SIZE
struct dpaa2_eth_sgt_cache {};

struct dpaa2_eth_trap_item {};

struct dpaa2_eth_trap_data {};

#define DPAA2_ETH_SG_ENTRIES_MAX

#define DPAA2_ETH_DEFAULT_COPYBREAK

#define DPAA2_ETH_ENQUEUE_MAX_FDS
struct dpaa2_eth_fds {};

/* Driver private data */
struct dpaa2_eth_priv {};

struct dpaa2_eth_devlink_priv {};

#define TX_TSTAMP
#define TX_TSTAMP_ONESTEP_SYNC

#define DPAA2_RXH_SUPPORTED

/* default Rx hash options, set during probing */
#define DPAA2_RXH_DEFAULT

#define dpaa2_eth_hash_enabled(priv)

/* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
#define DPAA2_CLASSIFIER_DMA_SIZE

extern const struct ethtool_ops dpaa2_ethtool_ops;
extern int dpaa2_phc_index;
extern struct ptp_qoriq *dpaa2_ptp;

static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
					 u16 ver_major, u16 ver_minor)
{}

/* Minimum firmware version that supports a more flexible API
 * for configuring the Rx flow hash key
 */
#define DPNI_RX_DIST_KEY_VER_MAJOR
#define DPNI_RX_DIST_KEY_VER_MINOR

#define dpaa2_eth_has_legacy_dist(priv)

#define dpaa2_eth_fs_enabled(priv)

#define dpaa2_eth_fs_mask_enabled(priv)

#define dpaa2_eth_fs_count(priv)

#define dpaa2_eth_tc_count(priv)

/* We have exactly one {Rx, Tx conf} queue per channel */
#define dpaa2_eth_queue_count(priv)

enum dpaa2_eth_rx_dist {};

/* Unique IDs for the supported Rx classification header fields */
#define DPAA2_ETH_DIST_ETHDST
#define DPAA2_ETH_DIST_ETHSRC
#define DPAA2_ETH_DIST_ETHTYPE
#define DPAA2_ETH_DIST_VLAN
#define DPAA2_ETH_DIST_IPSRC
#define DPAA2_ETH_DIST_IPDST
#define DPAA2_ETH_DIST_IPPROTO
#define DPAA2_ETH_DIST_L4SRC
#define DPAA2_ETH_DIST_L4DST
#define DPAA2_ETH_DIST_ALL

#define DPNI_PTP_ONESTEP_VER_MAJOR
#define DPNI_PTP_ONESTEP_VER_MINOR
#define DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT
#define DPAA2_PTP_SINGLE_STEP_ENABLE
#define DPAA2_PTP_SINGLE_STEP_CH
#define DPAA2_PTP_SINGLE_CORRECTION_OFF(v)

#define DPNI_PAUSE_VER_MAJOR
#define DPNI_PAUSE_VER_MINOR
#define dpaa2_eth_has_pause_support(priv)

static inline bool dpaa2_eth_tx_pause_enabled(u64 link_options)
{}

static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
{}

static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
{}

/* Extra headroom space requested to hardware, in order to make sure there's
 * no realloc'ing in forwarding scenarios
 */
static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
{}

static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv)
{}

static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv)
{}

int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
int dpaa2_eth_cls_key_size(u64 key);
int dpaa2_eth_cls_fld_off(int prot, int field);
void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);

void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
			       bool tx_pause, bool pfc);

extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops;

int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv);
void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv);

void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv);
void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv);

int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv);
void dpaa2_eth_dl_port_del(struct dpaa2_eth_priv *priv);

int dpaa2_eth_dl_traps_register(struct dpaa2_eth_priv *priv);
void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv);

struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
						  struct dpaa2_fapr *fapr);

struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv);
void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp);

struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
				    struct dpaa2_eth_channel *ch,
				    const struct dpaa2_fd *fd, u32 fd_length,
				    void *fd_vaddr);

void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
			   struct dpaa2_eth_channel *ch,
			   const struct dpaa2_fd *fd, void *vaddr,
			   struct dpaa2_eth_fq *fq,
			   struct rtnl_link_stats64 *percpu_stats,
			   struct sk_buff *skb);

void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
		  struct dpaa2_eth_channel *ch,
		  const struct dpaa2_fd *fd,
		  struct dpaa2_eth_fq *fq);

struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv);
void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv,
			 struct dpaa2_eth_bp *bp);

void *dpaa2_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr);
void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
			   struct dpaa2_eth_channel *ch,
			   dma_addr_t addr);

void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
			   struct dpaa2_eth_channel *ch,
			   struct dpaa2_fd *fd,
			   void *buf_start, u16 queue_id);

int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid);

void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
			  struct dpaa2_eth_channel *ch,
			  struct dpaa2_eth_fq *fq,
			  const struct dpaa2_fd *fd, bool in_napi);
bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
		  struct dpaa2_eth_channel *ch);

/* SGT (Scatter-Gather Table) cache management */
void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv);

void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf);

#endif	/* __DPAA2_H */