linux/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c

// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
 * Copyright 2016-2022 NXP
 */
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/etherdevice.h>
#include <linux/of_net.h>
#include <linux/interrupt.h>
#include <linux/kthread.h>
#include <linux/iommu.h>
#include <linux/fsl/mc.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/fsl/ptp_qoriq.h>
#include <linux/ptp_classify.h>
#include <net/pkt_cls.h>
#include <net/sock.h>
#include <net/tso.h>
#include <net/xdp_sock_drv.h>

#include "dpaa2-eth.h"

/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
 * using trace events only need to #include <trace/events/sched.h>
 */
#define CREATE_TRACE_POINTS
#include "dpaa2-eth-trace.h"

MODULE_LICENSE();
MODULE_AUTHOR();
MODULE_DESCRIPTION();

struct ptp_qoriq *dpaa2_ptp;
EXPORT_SYMBOL();

static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv)
{}

static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv,
					      u32 offset, u8 udp)
{}

static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv,
					    u32 offset, u8 udp)
{}

static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
{}

void *dpaa2_iova_to_virt(struct iommu_domain *domain,
			 dma_addr_t iova_addr)
{}

static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
				       u32 fd_status,
				       struct sk_buff *skb)
{}

/* Free a received FD.
 * Not to be used for Tx conf FDs or on any other paths.
 */
static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
				 const struct dpaa2_fd *fd,
				 void *vaddr)
{}

/* Build a linear skb based on a single-buffer frame descriptor */
static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
						  const struct dpaa2_fd *fd,
						  void *fd_vaddr)
{}

/* Build a non linear (fragmented) skb based on a S/G table */
static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
						struct dpaa2_eth_channel *ch,
						struct dpaa2_sg_entry *sgt)
{}

/* Free buffers acquired from the buffer pool or which were meant to
 * be released in the pool
 */
static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
				int count, bool xsk_zc)
{}

void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
			   struct dpaa2_eth_channel *ch,
			   dma_addr_t addr)
{}

static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
			       struct dpaa2_eth_fq *fq,
			       struct dpaa2_eth_xdp_fds *xdp_fds)
{}

static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
				   struct dpaa2_eth_channel *ch,
				   struct dpaa2_eth_fq *fq)
{}

void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
			   struct dpaa2_eth_channel *ch,
			   struct dpaa2_fd *fd,
			   void *buf_start, u16 queue_id)
{}

static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
			     struct dpaa2_eth_channel *ch,
			     struct dpaa2_eth_fq *rx_fq,
			     struct dpaa2_fd *fd, void *vaddr)
{}

struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
				    struct dpaa2_eth_channel *ch,
				    const struct dpaa2_fd *fd, u32 fd_length,
				    void *fd_vaddr)
{}

static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
					   const struct dpaa2_fd *fd,
					   void *fd_vaddr)
{}

void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
			   struct dpaa2_eth_channel *ch,
			   const struct dpaa2_fd *fd, void *vaddr,
			   struct dpaa2_eth_fq *fq,
			   struct rtnl_link_stats64 *percpu_stats,
			   struct sk_buff *skb)
{}

/* Main Rx frame processing routine */
void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
		  struct dpaa2_eth_channel *ch,
		  const struct dpaa2_fd *fd,
		  struct dpaa2_eth_fq *fq)
{}

/* Processing of Rx frames received on the error FQ
 * We check and print the error bits and then free the frame
 */
static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
			     struct dpaa2_eth_channel *ch,
			     const struct dpaa2_fd *fd,
			     struct dpaa2_eth_fq *fq __always_unused)
{}

/* Consume all frames pull-dequeued into the store. This is the simplest way to
 * make sure we don't accidentally issue another volatile dequeue which would
 * overwrite (leak) frames already in the store.
 *
 * Observance of NAPI budget is not our concern, leaving that to the caller.
 */
static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
				    struct dpaa2_eth_fq **src)
{}

static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
			       u8 *msgtype, u8 *twostep, u8 *udp,
			       u16 *correction_offset,
			       u16 *origintimestamp_offset)
{}

/* Configure the egress frame annotation for timestamp update */
static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
				       struct dpaa2_fd *fd,
				       void *buf_start,
				       struct sk_buff *skb)
{}

void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
{}

void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
{}

/* Create a frame descriptor based on a fragmented skb */
static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
				 struct sk_buff *skb,
				 struct dpaa2_fd *fd,
				 void **swa_addr)
{}

/* Create a SG frame descriptor based on a linear skb.
 *
 * This function is used on the Tx path when the skb headroom is not large
 * enough for the HW requirements, thus instead of realloc-ing the skb we
 * create a SG frame descriptor with only one entry.
 */
static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
					    struct sk_buff *skb,
					    struct dpaa2_fd *fd,
					    void **swa_addr)
{}

/* Create a frame descriptor based on a linear skb */
static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
				     struct sk_buff *skb,
				     struct dpaa2_fd *fd,
				     void **swa_addr)
{}

/* FD freeing routine on the Tx path
 *
 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
 * back-pointed to is also freed.
 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
 * dpaa2_eth_tx().
 */
void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
			  struct dpaa2_eth_channel *ch,
			  struct dpaa2_eth_fq *fq,
			  const struct dpaa2_fd *fd, bool in_napi)
{}

static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv,
				  struct sk_buff *skb, struct dpaa2_fd *fd,
				  int *num_fds, u32 *total_fds_len)
{}

static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
				  struct net_device *net_dev)
{}

static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
{}

static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
{}

/* Tx confirmation frame processing routine */
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
			      struct dpaa2_eth_channel *ch,
			      const struct dpaa2_fd *fd,
			      struct dpaa2_eth_fq *fq)
{}

static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
					   bool enable)
{}

static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
{}

static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
{}

/* Perform a single release command to add buffers
 * to the specified buffer pool
 */
static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
			      struct dpaa2_eth_channel *ch)
{}

static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv,
			       struct dpaa2_eth_channel *ch)
{}

static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv)
{}

/*
 * Drain the specified number of buffers from one of the DPNI's private buffer
 * pools.
 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
 */
static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid,
				 int count)
{}

static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid)
{}

static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv)
{}

/* Function is called from softirq context only, so we don't need to guard
 * the access to percpu count
 */
static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
				 struct dpaa2_eth_channel *ch)
{}

static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
{}

static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
{}

/* NAPI poll routine
 *
 * Frames are dequeued from the QMan channel associated with this NAPI context.
 * Rx, Tx confirmation and (if configured) Rx error frames all count
 * towards the NAPI budget.
 */
static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
{}

static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
{}

static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
{}

void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
			       bool tx_pause, bool pfc)
{}

static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
{}

static int dpaa2_eth_open(struct net_device *net_dev)
{}

/* Total number of in-flight frames on ingress queues */
static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
{}

static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
{}

#define DPNI_TX_PENDING_VER_MAJOR
#define DPNI_TX_PENDING_VER_MINOR
static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
{}

static int dpaa2_eth_stop(struct net_device *net_dev)
{}

static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
{}

/** Fill in counters maintained by the GPP driver. These may be different from
 * the hardware counters obtained by ethtool.
 */
static void dpaa2_eth_get_stats(struct net_device *net_dev,
				struct rtnl_link_stats64 *stats)
{}

/* Copy mac unicast addresses from @net_dev to @priv.
 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
 */
static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
				     struct dpaa2_eth_priv *priv)
{}

/* Copy mac multicast addresses from @net_dev to @priv
 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
 */
static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
				     struct dpaa2_eth_priv *priv)
{}

static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
				__be16 vlan_proto, u16 vid)
{}

static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
				 __be16 vlan_proto, u16 vid)
{}

static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
{}

static int dpaa2_eth_set_features(struct net_device *net_dev,
				  netdev_features_t features)
{}

static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{}

static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{}

static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
{}

static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
{}

static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
{}

static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
{}

static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
{}

static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{}

static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
				   struct xdp_frame *xdpf,
				   struct dpaa2_fd *fd)
{}

static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
			      struct xdp_frame **frames, u32 flags)
{}

static int update_xps(struct dpaa2_eth_priv *priv)
{}

static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
				  struct tc_mqprio_qopt *mqprio)
{}

#define bps_to_mbits(rate)

static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
{}

static int dpaa2_eth_setup_tc(struct net_device *net_dev,
			      enum tc_setup_type type, void *type_data)
{}

static const struct net_device_ops dpaa2_eth_ops =;

static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
{}

/* Allocate and configure a DPCON object */
static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
{}

static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
				 struct fsl_mc_device *dpcon)
{}

static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
{}

static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
				   struct dpaa2_eth_channel *channel)
{}

/* DPIO setup: allocate and configure QBMan channels, setup core affinity
 * and register data availability notifications
 */
static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
{}

static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
{}

static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
							      int cpu)
{}

static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
{}

static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
{}

/* Allocate and configure a buffer pool */
struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv)
{}

static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv)
{}

void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp)
{}

static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv)
{}

static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
{}

#define DPNI_ENQUEUE_FQID_VER_MAJOR
#define DPNI_ENQUEUE_FQID_VER_MINOR

static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
				       struct dpaa2_eth_fq *fq,
				       struct dpaa2_fd *fd, u8 prio,
				       u32 num_frames __always_unused,
				       int *frames_enqueued)
{}

static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
						struct dpaa2_eth_fq *fq,
						struct dpaa2_fd *fd,
						u8 prio, u32 num_frames,
						int *frames_enqueued)
{}

static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
{}

static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
{}

static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
{}

/* Configure ingress classification based on VLAN PCP */
static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
{}

/* Configure the DPNI object this interface is associated with */
static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
{}

static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
{}

static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
				   struct dpaa2_eth_fq *fq)
{}

static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
				   struct dpaa2_eth_fq *fq)
{}

static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
			     struct dpaa2_eth_fq *fq)
{}

/* Supported header fields for Rx hash distribution key */
static const struct dpaa2_eth_dist_fields dist_fields[] =;

/* Configure the Rx hash key using the legacy API */
static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{}

/* Configure the Rx hash key using the new API */
static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{}

/* Configure the Rx flow classification key */
static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{}

/* Size of the Rx flow classification key */
int dpaa2_eth_cls_key_size(u64 fields)
{}

/* Offset of header field in Rx classification key */
int dpaa2_eth_cls_fld_off(int prot, int field)
{}

/* Prune unused fields from the classification rule.
 * Used when masking is not supported
 */
void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
{}

/* Set Rx distribution (hash or flow classification) key
 * flags is a combination of RXH_ bits
 */
static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
				  enum dpaa2_eth_rx_dist type, u64 flags)
{}

int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
{}

int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
{}

static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
{}

/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
 * frame queues and channels
 */
static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
{}

/* Allocate rings for storing incoming frame descriptors */
static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
{}

static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
{}

static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
{}

static int dpaa2_eth_netdev_init(struct net_device *net_dev)
{}

static int dpaa2_eth_poll_link_state(void *arg)
{}

static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
{}

static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
{}

static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
{}

static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
{}

static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
{}

static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
{}

static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
{}

static void dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
{}

static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] =;
MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);

static struct fsl_mc_driver dpaa2_eth_driver =;

static int __init dpaa2_eth_driver_init(void)
{}

static void __exit dpaa2_eth_driver_exit(void)
{}

module_init();
module_exit(dpaa2_eth_driver_exit);