#define pr_fmt(fmt) …
#include <linux/init.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/io.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/icmp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/platform_device.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/highmem.h>
#include <linux/percpu.h>
#include <linux/dma-mapping.h>
#include <linux/sort.h>
#include <linux/phy_fixed.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <soc/fsl/bman.h>
#include <soc/fsl/qman.h>
#include "fman.h"
#include "fman_port.h"
#include "mac.h"
#include "dpaa_eth.h"
#define CREATE_TRACE_POINTS
#include "dpaa_eth_trace.h"
static int debug = …;
module_param(debug, int, 0444);
MODULE_PARM_DESC(…) …;
static u16 tx_timeout = …;
module_param(tx_timeout, ushort, 0444);
MODULE_PARM_DESC(…) …;
#define FM_FD_STAT_RX_ERRORS …
#define FM_FD_STAT_TX_ERRORS …
#define DPAA_MSG_DEFAULT …
#define DPAA_INGRESS_CS_THRESHOLD …
#define DPAA_FQ_TD …
#define DPAA_CS_THRESHOLD_1G …
#define DPAA_CS_THRESHOLD_10G …
#define FSL_QMAN_MAX_OAL …
#ifdef CONFIG_DPAA_ERRATUM_A050385
#define DPAA_FD_DATA_ALIGNMENT …
#define DPAA_A050385_ALIGN …
#define DPAA_FD_RX_DATA_ALIGNMENT …
#else
#define DPAA_FD_DATA_ALIGNMENT …
#define DPAA_FD_RX_DATA_ALIGNMENT …
#endif
#define DPAA_SGT_SIZE …
#define FM_L3_PARSE_RESULT_IPV4 …
#define FM_L3_PARSE_RESULT_IPV6 …
#define FM_L4_PARSE_RESULT_UDP …
#define FM_L4_PARSE_RESULT_TCP …
#define FM_FD_STAT_L4CV …
#define DPAA_SGT_MAX_ENTRIES …
#define DPAA_BUFF_RELEASE_MAX …
#define FSL_DPAA_BPID_INV …
#define FSL_DPAA_ETH_MAX_BUF_COUNT …
#define FSL_DPAA_ETH_REFILL_THRESHOLD …
#define DPAA_TX_PRIV_DATA_SIZE …
#define DPAA_PARSE_RESULTS_SIZE …
#define DPAA_TIME_STAMP_SIZE …
#define DPAA_HASH_RESULTS_SIZE …
#define DPAA_HWA_SIZE …
#define DPAA_RX_PRIV_DATA_DEFAULT_SIZE …
#ifdef CONFIG_DPAA_ERRATUM_A050385
#define DPAA_RX_PRIV_DATA_A050385_SIZE …
#define DPAA_RX_PRIV_DATA_SIZE …
#else
#define DPAA_RX_PRIV_DATA_SIZE …
#endif
#define DPAA_ETH_PCD_RXQ_NUM …
#define DPAA_ENQUEUE_RETRIES …
enum port_type { … };
struct fm_port_fqs { … };
static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
#define DPAA_BP_RAW_SIZE …
#ifdef CONFIG_DPAA_ERRATUM_A050385
#define dpaa_bp_size …
#else
#define dpaa_bp_size(raw_size) …
#endif
static int dpaa_max_frm;
static int dpaa_rx_extra_headroom;
#define dpaa_get_max_mtu() …
static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed);
static int dpaa_netdev_init(struct net_device *net_dev,
const struct net_device_ops *dpaa_ops,
u16 tx_timeout)
{ … }
static int dpaa_stop(struct net_device *net_dev)
{ … }
static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
{ … }
static void dpaa_get_stats64(struct net_device *net_dev,
struct rtnl_link_stats64 *s)
{ … }
static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data)
{ … }
static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
{ … }
static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
{ … }
static void dpaa_set_rx_mode(struct net_device *net_dev)
{ … }
static struct dpaa_bp *dpaa_bpid2pool(int bpid)
{ … }
static bool dpaa_bpid2pool_use(int bpid)
{ … }
static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
{ … }
static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
{ … }
static void dpaa_bp_drain(struct dpaa_bp *bp)
{ … }
static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
{ … }
static void dpaa_bps_free(struct dpaa_priv *priv)
{ … }
static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
{ … }
static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
u32 start, u32 count,
struct list_head *list,
enum dpaa_fq_type fq_type)
{ … }
static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
struct fm_port_fqs *port_fqs)
{ … }
static u32 rx_pool_channel;
static DEFINE_SPINLOCK(rx_pool_channel_init);
static int dpaa_get_channel(void)
{ … }
static void dpaa_release_channel(void)
{ … }
static void dpaa_eth_add_channel(u16 channel, struct device *dev)
{ … }
static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
int congested)
{ … }
static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
{ … }
static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed)
{ … }
static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
struct dpaa_fq *fq,
const struct qman_fq *template)
{ … }
static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
struct dpaa_fq *fq,
struct fman_port *port,
const struct qman_fq *template)
{ … }
static int dpaa_fq_setup(struct dpaa_priv *priv,
const struct dpaa_fq_cbs *fq_cbs,
struct fman_port *tx_port)
{ … }
static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
struct qman_fq *tx_fq)
{ … }
static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
{ … }
static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
{ … }
static int dpaa_fq_free(struct device *dev, struct list_head *list)
{ … }
static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
struct dpaa_fq *defq,
struct dpaa_buffer_layout *buf_layout)
{ … }
static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
struct dpaa_fq *errq,
struct dpaa_fq *defq, struct dpaa_fq *pcdq,
struct dpaa_buffer_layout *buf_layout)
{ … }
static int dpaa_eth_init_ports(struct mac_device *mac_dev,
struct dpaa_bp *bp,
struct fm_port_fqs *port_fqs,
struct dpaa_buffer_layout *buf_layout,
struct device *dev)
{ … }
static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
struct bm_buffer *bmb, int cnt)
{ … }
static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
{ … }
static void dpaa_fd_release(const struct net_device *net_dev,
const struct qm_fd *fd)
{ … }
static void count_ern(struct dpaa_percpu_priv *percpu_priv,
const union qm_mr_entry *msg)
{ … }
static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
struct sk_buff *skb,
struct qm_fd *fd,
void *parse_results)
{ … }
static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
{ … }
static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
{ … }
static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
{ … }
static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
{ … }
static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
const struct qm_fd *fd, bool ts)
{ … }
static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
{ … }
#define PTR_IS_ALIGNED(x, a) …
static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
const struct qm_fd *fd)
{ … }
static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
const struct qm_fd *fd)
{ … }
static int skb_to_contig_fd(struct dpaa_priv *priv,
struct sk_buff *skb, struct qm_fd *fd,
int *offset)
{ … }
static int skb_to_sg_fd(struct dpaa_priv *priv,
struct sk_buff *skb, struct qm_fd *fd)
{ … }
static inline int dpaa_xmit(struct dpaa_priv *priv,
struct rtnl_link_stats64 *percpu_stats,
int queue,
struct qm_fd *fd)
{ … }
#ifdef CONFIG_DPAA_ERRATUM_A050385
static int dpaa_a050385_wa_skb(struct net_device *net_dev, struct sk_buff **s)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
struct sk_buff *new_skb, *skb = *s;
unsigned char *start, i;
if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN))
goto workaround;
if (!skb_is_nonlinear(skb))
return 0;
if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
goto workaround;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN))
goto workaround;
if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) &&
(i < skb_shinfo(skb)->nr_frags - 1))
goto workaround;
}
return 0;
workaround:
new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
priv->tx_headroom);
if (!new_skb)
return -ENOMEM;
skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD);
start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
if (start - new_skb->data)
skb_reserve(new_skb, start - new_skb->data);
skb_put(new_skb, skb->len);
skb_copy_bits(skb, 0, new_skb->data, skb->len);
skb_copy_header(new_skb, skb);
new_skb->dev = skb->dev;
if (priv->tx_tstamp) {
skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags;
skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps;
skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey;
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
}
skb_set_network_header(new_skb, skb_network_offset(skb));
skb_set_transport_header(new_skb, skb_transport_offset(skb));
dev_kfree_skb(skb);
*s = new_skb;
return 0;
}
static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
struct xdp_frame **init_xdpf)
{
struct xdp_frame *new_xdpf, *xdpf = *init_xdpf;
void *new_buff, *aligned_data;
struct page *p;
u32 data_shift;
int headroom;
if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) &&
xdpf->headroom >= priv->tx_headroom) {
xdpf->headroom = priv->tx_headroom;
return 0;
}
aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT);
data_shift = xdpf->data - aligned_data;
if (xdpf->headroom >= data_shift + priv->tx_headroom) {
memmove(aligned_data, xdpf->data, xdpf->len);
xdpf->data = aligned_data;
xdpf->headroom = priv->tx_headroom;
return 0;
}
headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom,
DPAA_FD_DATA_ALIGNMENT);
if (headroom + xdpf->len > DPAA_BP_RAW_SIZE -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
return -ENOMEM;
p = dev_alloc_pages(0);
if (unlikely(!p))
return -ENOMEM;
new_buff = page_address(p);
memcpy(new_buff + headroom, xdpf->data, xdpf->len);
new_xdpf = new_buff;
new_xdpf->data = new_buff + headroom;
new_xdpf->len = xdpf->len;
new_xdpf->headroom = priv->tx_headroom;
new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
xdp_return_frame_rx_napi(xdpf);
*init_xdpf = new_xdpf;
return 0;
}
#endif
static netdev_tx_t
dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
{ … }
static void dpaa_rx_error(struct net_device *net_dev,
const struct dpaa_priv *priv,
struct dpaa_percpu_priv *percpu_priv,
const struct qm_fd *fd,
u32 fqid)
{ … }
static void dpaa_tx_error(struct net_device *net_dev,
const struct dpaa_priv *priv,
struct dpaa_percpu_priv *percpu_priv,
const struct qm_fd *fd,
u32 fqid)
{ … }
static int dpaa_eth_poll(struct napi_struct *napi, int budget)
{ … }
static void dpaa_tx_conf(struct net_device *net_dev,
const struct dpaa_priv *priv,
struct dpaa_percpu_priv *percpu_priv,
const struct qm_fd *fd,
u32 fqid)
{ … }
static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
struct qman_portal *portal, bool sched_napi)
{ … }
static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq,
bool sched_napi)
{ … }
static int dpaa_xdp_xmit_frame(struct net_device *net_dev,
struct xdp_frame *xdpf)
{ … }
static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
struct dpaa_fq *dpaa_fq, unsigned int *xdp_meta_len)
{ … }
static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq,
bool sched_napi)
{ … }
static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq,
bool sched_napi)
{ … }
static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq,
bool sched_napi)
{ … }
static void egress_ern(struct qman_portal *portal,
struct qman_fq *fq,
const union qm_mr_entry *msg)
{ … }
static const struct dpaa_fq_cbs dpaa_fq_cbs = …;
static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
{ … }
static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
{ … }
static int dpaa_open(struct net_device *net_dev)
{ … }
static int dpaa_eth_stop(struct net_device *net_dev)
{ … }
static bool xdp_validate_mtu(struct dpaa_priv *priv, int mtu)
{ … }
static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu)
{ … }
static int dpaa_setup_xdp(struct net_device *net_dev, struct netdev_bpf *bpf)
{ … }
static int dpaa_xdp(struct net_device *net_dev, struct netdev_bpf *xdp)
{ … }
static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
struct xdp_frame **frames, u32 flags)
{ … }
static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{ … }
static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
{ … }
static const struct net_device_ops dpaa_ops = …;
static int dpaa_napi_add(struct net_device *net_dev)
{ … }
static void dpaa_napi_del(struct net_device *net_dev)
{ … }
static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
struct bm_buffer *bmb)
{ … }
static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
{ … }
static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
{ … }
static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
enum port_type port)
{ … }
static int dpaa_eth_probe(struct platform_device *pdev)
{ … }
static void dpaa_remove(struct platform_device *pdev)
{ … }
static const struct platform_device_id dpaa_devtype[] = …;
MODULE_DEVICE_TABLE(platform, dpaa_devtype);
static struct platform_driver dpaa_driver = …;
static int __init dpaa_load(void)
{ … }
module_init(…) …;
static void __exit dpaa_unload(void)
{ … }
module_exit(dpaa_unload);
MODULE_LICENSE(…) …;
MODULE_DESCRIPTION(…) …;