linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c

// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
 * Copyright 2008 - 2016 Freescale Semiconductor Inc.
 * Copyright 2020 NXP
 */

#define pr_fmt(fmt)

#include <linux/init.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/io.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/icmp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/platform_device.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/highmem.h>
#include <linux/percpu.h>
#include <linux/dma-mapping.h>
#include <linux/sort.h>
#include <linux/phy_fixed.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <soc/fsl/bman.h>
#include <soc/fsl/qman.h>
#include "fman.h"
#include "fman_port.h"
#include "mac.h"
#include "dpaa_eth.h"

/* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files
 * using trace events only need to #include <trace/events/sched.h>
 */
#define CREATE_TRACE_POINTS
#include "dpaa_eth_trace.h"

static int debug =;
module_param(debug, int, 0444);
MODULE_PARM_DESC();

static u16 tx_timeout =;
module_param(tx_timeout, ushort, 0444);
MODULE_PARM_DESC();

#define FM_FD_STAT_RX_ERRORS

#define FM_FD_STAT_TX_ERRORS

#define DPAA_MSG_DEFAULT

#define DPAA_INGRESS_CS_THRESHOLD
/* Ingress congestion threshold on FMan ports
 * The size in bytes of the ingress tail-drop threshold on FMan ports.
 * Traffic piling up above this value will be rejected by QMan and discarded
 * by FMan.
 */

/* Size in bytes of the FQ taildrop threshold */
#define DPAA_FQ_TD

#define DPAA_CS_THRESHOLD_1G
/* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
 * The size in bytes of the egress Congestion State notification threshold on
 * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
 * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
 * and the larger the frame size, the more acute the problem.
 * So we have to find a balance between these factors:
 * - avoiding the device staying congested for a prolonged time (risking
 *   the netdev watchdog to fire - see also the tx_timeout module param);
 * - affecting performance of protocols such as TCP, which otherwise
 *   behave well under the congestion notification mechanism;
 * - preventing the Tx cores from tightly-looping (as if the congestion
 *   threshold was too low to be effective);
 * - running out of memory if the CS threshold is set too high.
 */

#define DPAA_CS_THRESHOLD_10G
/* The size in bytes of the egress Congestion State notification threshold on
 * 10G ports, range 0x1000 .. 0x10000000
 */

/* Largest value that the FQD's OAL field can hold */
#define FSL_QMAN_MAX_OAL

/* Default alignment for start of data in an Rx FD */
#ifdef CONFIG_DPAA_ERRATUM_A050385
/* aligning data start to 64 avoids DMA transaction splits, unless the buffer
 * is crossing a 4k page boundary
 */
#define DPAA_FD_DATA_ALIGNMENT
/* aligning to 256 avoids DMA transaction splits caused by 4k page boundary
 * crossings; also, all SG fragments except the last must have a size multiple
 * of 256 to avoid DMA transaction splits
 */
#define DPAA_A050385_ALIGN
#define DPAA_FD_RX_DATA_ALIGNMENT
#else
#define DPAA_FD_DATA_ALIGNMENT
#define DPAA_FD_RX_DATA_ALIGNMENT
#endif

/* The DPAA requires 256 bytes reserved and mapped for the SGT */
#define DPAA_SGT_SIZE

/* Values for the L3R field of the FM Parse Results
 */
/* L3 Type field: First IP Present IPv4 */
#define FM_L3_PARSE_RESULT_IPV4
/* L3 Type field: First IP Present IPv6 */
#define FM_L3_PARSE_RESULT_IPV6
/* Values for the L4R field of the FM Parse Results */
/* L4 Type field: UDP */
#define FM_L4_PARSE_RESULT_UDP
/* L4 Type field: TCP */
#define FM_L4_PARSE_RESULT_TCP

/* FD status field indicating whether the FM Parser has attempted to validate
 * the L4 csum of the frame.
 * Note that having this bit set doesn't necessarily imply that the checksum
 * is valid. One would have to check the parse results to find that out.
 */
#define FM_FD_STAT_L4CV

#define DPAA_SGT_MAX_ENTRIES
#define DPAA_BUFF_RELEASE_MAX

#define FSL_DPAA_BPID_INV
#define FSL_DPAA_ETH_MAX_BUF_COUNT
#define FSL_DPAA_ETH_REFILL_THRESHOLD

#define DPAA_TX_PRIV_DATA_SIZE
#define DPAA_PARSE_RESULTS_SIZE
#define DPAA_TIME_STAMP_SIZE
#define DPAA_HASH_RESULTS_SIZE
#define DPAA_HWA_SIZE
#define DPAA_RX_PRIV_DATA_DEFAULT_SIZE
#ifdef CONFIG_DPAA_ERRATUM_A050385
#define DPAA_RX_PRIV_DATA_A050385_SIZE
#define DPAA_RX_PRIV_DATA_SIZE
#else
#define DPAA_RX_PRIV_DATA_SIZE
#endif

#define DPAA_ETH_PCD_RXQ_NUM

#define DPAA_ENQUEUE_RETRIES

enum port_type {};

struct fm_port_fqs {};

/* All the dpa bps in use at any moment */
static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];

#define DPAA_BP_RAW_SIZE

#ifdef CONFIG_DPAA_ERRATUM_A050385
#define dpaa_bp_size
#else
#define dpaa_bp_size(raw_size)
#endif

static int dpaa_max_frm;

static int dpaa_rx_extra_headroom;

#define dpaa_get_max_mtu()

static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed);

static int dpaa_netdev_init(struct net_device *net_dev,
			    const struct net_device_ops *dpaa_ops,
			    u16 tx_timeout)
{}

static int dpaa_stop(struct net_device *net_dev)
{}

static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
{}

/* Calculates the statistics for the given device by adding the statistics
 * collected by each CPU.
 */
static void dpaa_get_stats64(struct net_device *net_dev,
			     struct rtnl_link_stats64 *s)
{}

static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
			 void *type_data)
{}

static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
{}

static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
{}

static void dpaa_set_rx_mode(struct net_device *net_dev)
{}

static struct dpaa_bp *dpaa_bpid2pool(int bpid)
{}

/* checks if this bpool is already allocated */
static bool dpaa_bpid2pool_use(int bpid)
{}

/* called only once per bpid by dpaa_bp_alloc_pool() */
static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
{}

static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
{}

/* remove and free all the buffers from the given buffer pool */
static void dpaa_bp_drain(struct dpaa_bp *bp)
{}

static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
{}

static void dpaa_bps_free(struct dpaa_priv *priv)
{}

/* Use multiple WQs for FQ assignment:
 *	- Tx Confirmation queues go to WQ1.
 *	- Rx Error and Tx Error queues go to WQ5 (giving them a better chance
 *	  to be scheduled, in case there are many more FQs in WQ6).
 *	- Rx Default goes to WQ6.
 *	- Tx queues go to different WQs depending on their priority. Equal
 *	  chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
 *	  WQ0 (highest priority).
 * This ensures that Tx-confirmed buffers are timely released. In particular,
 * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
 * are greatly outnumbered by other FQs in the system, while
 * dequeue scheduling is round-robin.
 */
static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
{}

static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
				     u32 start, u32 count,
				     struct list_head *list,
				     enum dpaa_fq_type fq_type)
{}

static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
			      struct fm_port_fqs *port_fqs)
{}

static u32 rx_pool_channel;
static DEFINE_SPINLOCK(rx_pool_channel_init);

static int dpaa_get_channel(void)
{}

static void dpaa_release_channel(void)
{}

static void dpaa_eth_add_channel(u16 channel, struct device *dev)
{}

/* Congestion group state change notification callback.
 * Stops the device's egress queues while they are congested and
 * wakes them upon exiting congested state.
 * Also updates some CGR-related stats.
 */
static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
			   int congested)
{}

static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
{}

static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed)
{}

static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
				      struct dpaa_fq *fq,
				      const struct qman_fq *template)
{}

static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
				     struct dpaa_fq *fq,
				     struct fman_port *port,
				     const struct qman_fq *template)
{}

static int dpaa_fq_setup(struct dpaa_priv *priv,
			 const struct dpaa_fq_cbs *fq_cbs,
			 struct fman_port *tx_port)
{}

static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
				   struct qman_fq *tx_fq)
{}

static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
{}

static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
{}

static int dpaa_fq_free(struct device *dev, struct list_head *list)
{}

static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
				 struct dpaa_fq *defq,
				 struct dpaa_buffer_layout *buf_layout)
{}

static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
				 struct dpaa_fq *errq,
				 struct dpaa_fq *defq, struct dpaa_fq *pcdq,
				 struct dpaa_buffer_layout *buf_layout)
{}

static int dpaa_eth_init_ports(struct mac_device *mac_dev,
			       struct dpaa_bp *bp,
			       struct fm_port_fqs *port_fqs,
			       struct dpaa_buffer_layout *buf_layout,
			       struct device *dev)
{}

static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
			     struct bm_buffer *bmb, int cnt)
{}

static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
{}

static void dpaa_fd_release(const struct net_device *net_dev,
			    const struct qm_fd *fd)
{}

static void count_ern(struct dpaa_percpu_priv *percpu_priv,
		      const union qm_mr_entry *msg)
{}

/* Turn on HW checksum computation for this outgoing frame.
 * If the current protocol is not something we support in this regard
 * (or if the stack has already computed the SW checksum), we do nothing.
 *
 * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
 * otherwise.
 *
 * Note that this function may modify the fd->cmd field and the skb data buffer
 * (the Parse Results area).
 */
static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
			       struct sk_buff *skb,
			       struct qm_fd *fd,
			       void *parse_results)
{}

static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
{}

static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
{}

/* Add buffers/(pages) for Rx processing whenever bpool count falls below
 * REFILL_THRESHOLD.
 */
static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
{}

static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
{}

/* Cleanup function for outgoing frame descriptors that were built on Tx path,
 * either contiguous frames or scatter/gather ones.
 * Skb freeing is not handled here.
 *
 * This function may be called on error paths in the Tx function, so guard
 * against cases when not all fd relevant fields were filled in. To avoid
 * reading the invalid transmission timestamp for the error paths set ts to
 * false.
 *
 * Return the skb backpointer, since for S/G frames the buffer containing it
 * gets freed here.
 *
 * No skb backpointer is set when transmitting XDP frames. Cleanup the buffer
 * and return NULL in this case.
 */
static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
					  const struct qm_fd *fd, bool ts)
{}

static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
{}

#define PTR_IS_ALIGNED(x, a)

/* Build a linear skb around the received buffer.
 * We are guaranteed there is enough room at the end of the data buffer to
 * accommodate the shared info area of the skb.
 */
static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
					const struct qm_fd *fd)
{}

/* Build an skb with the data of the first S/G entry in the linear portion and
 * the rest of the frame as skb fragments.
 *
 * The page fragment holding the S/G Table is recycled here.
 */
static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
				    const struct qm_fd *fd)
{}

static int skb_to_contig_fd(struct dpaa_priv *priv,
			    struct sk_buff *skb, struct qm_fd *fd,
			    int *offset)
{}

static int skb_to_sg_fd(struct dpaa_priv *priv,
			struct sk_buff *skb, struct qm_fd *fd)
{}

static inline int dpaa_xmit(struct dpaa_priv *priv,
			    struct rtnl_link_stats64 *percpu_stats,
			    int queue,
			    struct qm_fd *fd)
{}

#ifdef CONFIG_DPAA_ERRATUM_A050385
static int dpaa_a050385_wa_skb(struct net_device *net_dev, struct sk_buff **s)
{
	struct dpaa_priv *priv = netdev_priv(net_dev);
	struct sk_buff *new_skb, *skb = *s;
	unsigned char *start, i;

	/* check linear buffer alignment */
	if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN))
		goto workaround;

	/* linear buffers just need to have an aligned start */
	if (!skb_is_nonlinear(skb))
		return 0;

	/* linear data size for nonlinear skbs needs to be aligned */
	if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
		goto workaround;

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		/* all fragments need to have aligned start addresses */
		if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN))
			goto workaround;

		/* all but last fragment need to have aligned sizes */
		if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) &&
		    (i < skb_shinfo(skb)->nr_frags - 1))
			goto workaround;
	}

	return 0;

workaround:
	/* copy all the skb content into a new linear buffer */
	new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
						priv->tx_headroom);
	if (!new_skb)
		return -ENOMEM;

	/* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */
	skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD);

	/* Workaround for DPAA_A050385 requires data start to be aligned */
	start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
	if (start - new_skb->data)
		skb_reserve(new_skb, start - new_skb->data);

	skb_put(new_skb, skb->len);
	skb_copy_bits(skb, 0, new_skb->data, skb->len);
	skb_copy_header(new_skb, skb);
	new_skb->dev = skb->dev;

	/* Copy relevant timestamp info from the old skb to the new */
	if (priv->tx_tstamp) {
		skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags;
		skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps;
		skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey;
		if (skb->sk)
			skb_set_owner_w(new_skb, skb->sk);
	}

	/* We move the headroom when we align it so we have to reset the
	 * network and transport header offsets relative to the new data
	 * pointer. The checksum offload relies on these offsets.
	 */
	skb_set_network_header(new_skb, skb_network_offset(skb));
	skb_set_transport_header(new_skb, skb_transport_offset(skb));

	dev_kfree_skb(skb);
	*s = new_skb;

	return 0;
}

static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
				struct xdp_frame **init_xdpf)
{
	struct xdp_frame *new_xdpf, *xdpf = *init_xdpf;
	void *new_buff, *aligned_data;
	struct page *p;
	u32 data_shift;
	int headroom;

	/* Check the data alignment and make sure the headroom is large
	 * enough to store the xdpf backpointer. Use an aligned headroom
	 * value.
	 *
	 * Due to alignment constraints, we give XDP access to the full 256
	 * byte frame headroom. If the XDP program uses all of it, copy the
	 * data to a new buffer and make room for storing the backpointer.
	 */
	if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) &&
	    xdpf->headroom >= priv->tx_headroom) {
		xdpf->headroom = priv->tx_headroom;
		return 0;
	}

	/* Try to move the data inside the buffer just enough to align it and
	 * store the xdpf backpointer. If the available headroom isn't large
	 * enough, resort to allocating a new buffer and copying the data.
	 */
	aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT);
	data_shift = xdpf->data - aligned_data;

	/* The XDP frame's headroom needs to be large enough to accommodate
	 * shifting the data as well as storing the xdpf backpointer.
	 */
	if (xdpf->headroom  >= data_shift + priv->tx_headroom) {
		memmove(aligned_data, xdpf->data, xdpf->len);
		xdpf->data = aligned_data;
		xdpf->headroom = priv->tx_headroom;
		return 0;
	}

	/* The new xdp_frame is stored in the new buffer. Reserve enough space
	 * in the headroom for storing it along with the driver's private
	 * info. The headroom needs to be aligned to DPAA_FD_DATA_ALIGNMENT to
	 * guarantee the data's alignment in the buffer.
	 */
	headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom,
			 DPAA_FD_DATA_ALIGNMENT);

	/* Assure the extended headroom and data don't overflow the buffer,
	 * while maintaining the mandatory tailroom.
	 */
	if (headroom + xdpf->len > DPAA_BP_RAW_SIZE -
			SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
		return -ENOMEM;

	p = dev_alloc_pages(0);
	if (unlikely(!p))
		return -ENOMEM;

	/* Copy the data to the new buffer at a properly aligned offset */
	new_buff = page_address(p);
	memcpy(new_buff + headroom, xdpf->data, xdpf->len);

	/* Create an XDP frame around the new buffer in a similar fashion
	 * to xdp_convert_buff_to_frame.
	 */
	new_xdpf = new_buff;
	new_xdpf->data = new_buff + headroom;
	new_xdpf->len = xdpf->len;
	new_xdpf->headroom = priv->tx_headroom;
	new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
	new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;

	/* Release the initial buffer */
	xdp_return_frame_rx_napi(xdpf);

	*init_xdpf = new_xdpf;
	return 0;
}
#endif

static netdev_tx_t
dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
{}

static void dpaa_rx_error(struct net_device *net_dev,
			  const struct dpaa_priv *priv,
			  struct dpaa_percpu_priv *percpu_priv,
			  const struct qm_fd *fd,
			  u32 fqid)
{}

static void dpaa_tx_error(struct net_device *net_dev,
			  const struct dpaa_priv *priv,
			  struct dpaa_percpu_priv *percpu_priv,
			  const struct qm_fd *fd,
			  u32 fqid)
{}

static int dpaa_eth_poll(struct napi_struct *napi, int budget)
{}

static void dpaa_tx_conf(struct net_device *net_dev,
			 const struct dpaa_priv *priv,
			 struct dpaa_percpu_priv *percpu_priv,
			 const struct qm_fd *fd,
			 u32 fqid)
{}

static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
					 struct qman_portal *portal, bool sched_napi)
{}

static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
					      struct qman_fq *fq,
					      const struct qm_dqrr_entry *dq,
					      bool sched_napi)
{}

static int dpaa_xdp_xmit_frame(struct net_device *net_dev,
			       struct xdp_frame *xdpf)
{}

static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
			struct dpaa_fq *dpaa_fq, unsigned int *xdp_meta_len)
{}

static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
						struct qman_fq *fq,
						const struct qm_dqrr_entry *dq,
						bool sched_napi)
{}

static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
						struct qman_fq *fq,
						const struct qm_dqrr_entry *dq,
						bool sched_napi)
{}

static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
					       struct qman_fq *fq,
					       const struct qm_dqrr_entry *dq,
					       bool sched_napi)
{}

static void egress_ern(struct qman_portal *portal,
		       struct qman_fq *fq,
		       const union qm_mr_entry *msg)
{}

static const struct dpaa_fq_cbs dpaa_fq_cbs =;

static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
{}

static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
{}

static int dpaa_open(struct net_device *net_dev)
{}

static int dpaa_eth_stop(struct net_device *net_dev)
{}

static bool xdp_validate_mtu(struct dpaa_priv *priv, int mtu)
{}

static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu)
{}

static int dpaa_setup_xdp(struct net_device *net_dev, struct netdev_bpf *bpf)
{}

static int dpaa_xdp(struct net_device *net_dev, struct netdev_bpf *xdp)
{}

static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
			 struct xdp_frame **frames, u32 flags)
{}

static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{}

static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
{}

static const struct net_device_ops dpaa_ops =;

static int dpaa_napi_add(struct net_device *net_dev)
{}

static void dpaa_napi_del(struct net_device *net_dev)
{}

static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
				   struct bm_buffer *bmb)
{}

/* Alloc the dpaa_bp struct and configure default values */
static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
{}

/* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
 * We won't be sending congestion notifications to FMan; for now, we just use
 * this CGR to generate enqueue rejections to FMan in order to drop the frames
 * before they reach our ingress queues and eat up memory.
 */
static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
{}

static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
			     enum port_type port)
{}

static int dpaa_eth_probe(struct platform_device *pdev)
{}

static void dpaa_remove(struct platform_device *pdev)
{}

static const struct platform_device_id dpaa_devtype[] =;
MODULE_DEVICE_TABLE(platform, dpaa_devtype);

static struct platform_driver dpaa_driver =;

static int __init dpaa_load(void)
{}
module_init();

static void __exit dpaa_unload(void)
{}
module_exit(dpaa_unload);

MODULE_LICENSE();
MODULE_DESCRIPTION();