linux/drivers/net/ethernet/fungible/funeth/funeth_main.c

// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)

#include <linux/bpf.h>
#include <linux/crash_dump.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/filter.h>
#include <linux/idr.h>
#include <linux/if_vlan.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/rtnetlink.h>
#include <linux/inetdevice.h>

#include "funeth.h"
#include "funeth_devlink.h"
#include "funeth_ktls.h"
#include "fun_port.h"
#include "fun_queue.h"
#include "funeth_txrx.h"

#define ADMIN_SQ_DEPTH
#define ADMIN_CQ_DEPTH
#define ADMIN_RQ_DEPTH

/* Default number of Tx/Rx queues. */
#define FUN_DFLT_QUEUES

enum {};

static const struct pci_device_id funeth_id_table[] =;

/* Issue a port write admin command with @n key/value pairs. */
static int fun_port_write_cmds(struct funeth_priv *fp, unsigned int n,
			       const int *keys, const u64 *data)
{}

int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data)
{}

/* Issue a port read admin command with @n key/value pairs. */
static int fun_port_read_cmds(struct funeth_priv *fp, unsigned int n,
			      const int *keys, u64 *data)
{}

int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data)
{}

static void fun_report_link(struct net_device *netdev)
{}

static int fun_adi_write(struct fun_dev *fdev, enum fun_admin_adi_attr attr,
			 unsigned int adi_id, const struct fun_adi_param *param)
{}

/* Configure RSS for the given port. @op determines whether a new RSS context
 * is to be created or whether an existing one should be reconfigured. The
 * remaining parameters specify the hashing algorithm, key, and indirection
 * table.
 *
 * This initiates packet delivery to the Rx queues set in the indirection
 * table.
 */
int fun_config_rss(struct net_device *dev, int algo, const u8 *key,
		   const u32 *qtable, u8 op)
{}

/* Destroy the HW RSS conntext associated with the given port. This also stops
 * all packet delivery to our Rx queues.
 */
static void fun_destroy_rss(struct funeth_priv *fp)
{}

static void fun_irq_aff_notify(struct irq_affinity_notify *notify,
			       const cpumask_t *mask)
{}

static void fun_irq_aff_release(struct kref __always_unused *ref)
{}

/* Allocate an IRQ structure, assign an MSI-X index and initial affinity to it,
 * and add it to the IRQ XArray.
 */
static struct fun_irq *fun_alloc_qirq(struct funeth_priv *fp, unsigned int idx,
				      int node, unsigned int xa_idx_offset)
{}

static void fun_free_qirq(struct funeth_priv *fp, struct fun_irq *irq)
{}

/* Release the IRQs reserved for Tx/Rx queues that aren't being used. */
static void fun_prune_queue_irqs(struct net_device *dev)
{}

/* Reserve IRQs, one per queue, to acommodate the requested queue numbers @ntx
 * and @nrx. IRQs are added incrementally to those we already have.
 * We hold on to allocated IRQs until garbage collection of unused IRQs is
 * separately requested.
 */
static int fun_alloc_queue_irqs(struct net_device *dev, unsigned int ntx,
				unsigned int nrx)
{}

static void free_txqs(struct funeth_txq **txqs, unsigned int nqs,
		      unsigned int start, int state)
{}

static int alloc_txqs(struct net_device *dev, struct funeth_txq **txqs,
		      unsigned int nqs, unsigned int depth, unsigned int start,
		      int state)
{}

static void free_rxqs(struct funeth_rxq **rxqs, unsigned int nqs,
		      unsigned int start, int state)
{}

static int alloc_rxqs(struct net_device *dev, struct funeth_rxq **rxqs,
		      unsigned int nqs, unsigned int ncqe, unsigned int nrqe,
		      unsigned int start, int state)
{}

static void free_xdpqs(struct funeth_txq **xdpqs, unsigned int nqs,
		       unsigned int start, int state)
{}

static struct funeth_txq **alloc_xdpqs(struct net_device *dev, unsigned int nqs,
				       unsigned int depth, unsigned int start,
				       int state)
{}

static void fun_free_rings(struct net_device *netdev, struct fun_qset *qset)
{}

static int fun_alloc_rings(struct net_device *netdev, struct fun_qset *qset)
{}

/* Take queues to the next level. Presently this means creating them on the
 * device.
 */
static int fun_advance_ring_state(struct net_device *dev, struct fun_qset *qset)
{}

static int fun_port_create(struct net_device *netdev)
{}

static int fun_port_destroy(struct net_device *netdev)
{}

static int fun_eth_create(struct funeth_priv *fp)
{}

static int fun_vi_create(struct funeth_priv *fp)
{}

/* Helper to create an ETH flow and bind an SQ to it.
 * Returns the ETH id (>= 0) on success or a negative error.
 */
int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid)
{}

static irqreturn_t fun_queue_irq_handler(int irq, void *data)
{}

static int fun_enable_irqs(struct net_device *dev)
{}

static void fun_disable_one_irq(struct fun_irq *irq)
{}

static void fun_disable_irqs(struct net_device *dev)
{}

static void fun_down(struct net_device *dev, struct fun_qset *qset)
{}

static int fun_up(struct net_device *dev, struct fun_qset *qset)
{}

static int funeth_open(struct net_device *netdev)
{}

static int funeth_close(struct net_device *netdev)
{}

static void fun_get_stats64(struct net_device *netdev,
			    struct rtnl_link_stats64 *stats)
{}

static int fun_change_mtu(struct net_device *netdev, int new_mtu)
{}

static int fun_set_macaddr(struct net_device *netdev, void *addr)
{}

static int fun_get_port_attributes(struct net_device *netdev)
{}

static int fun_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{}

static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{}

static int fun_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{}

/* Prepare the queues for XDP. */
static int fun_enter_xdp(struct net_device *dev, struct bpf_prog *prog)
{}

/* Set the queues for non-XDP operation. */
static void fun_end_xdp(struct net_device *dev)
{}

#define XDP_MAX_MTU

static int fun_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
{}

static int fun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{}

static int fun_init_vports(struct fun_ethdev *ed, unsigned int n)
{}

static void fun_free_vports(struct fun_ethdev *ed)
{}

static struct fun_vport_info *fun_get_vport(struct fun_ethdev *ed,
					    unsigned int vport)
{}

static int fun_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{}

static int fun_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
			   __be16 vlan_proto)
{}

static int fun_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
			   int max_tx_rate)
{}

static int fun_get_vf_config(struct net_device *dev, int vf,
			     struct ifla_vf_info *ivi)
{}

static void fun_uninit(struct net_device *dev)
{}

static const struct net_device_ops fun_netdev_ops =;

#define GSO_ENCAP_FLAGS
#define TSO_FLAGS
#define VLAN_FEAT

static void fun_dflt_rss_indir(struct funeth_priv *fp, unsigned int nrx)
{}

/* Reset the RSS indirection table to equal distribution across the current
 * number of Rx queues. Called at init time and whenever the number of Rx
 * queues changes subsequently. Note that this may also resize the indirection
 * table.
 */
static void fun_reset_rss_indir(struct net_device *dev, unsigned int nrx)
{}

/* Update the RSS LUT to contain only queues in [0, nrx). Normally this will
 * update the LUT to an equal distribution among nrx queues, If @only_if_needed
 * is set the LUT is left unchanged if it already does not reference any queues
 * >= nrx.
 */
static int fun_rss_set_qnum(struct net_device *dev, unsigned int nrx,
			    bool only_if_needed)
{}

/* Allocate the DMA area for the RSS configuration commands to the device, and
 * initialize the hash, hash key, indirection table size and its entries to
 * their defaults. The indirection table defaults to equal distribution across
 * the Rx queues.
 */
static int fun_init_rss(struct net_device *dev)
{}

static void fun_free_rss(struct funeth_priv *fp)
{}

void fun_set_ring_count(struct net_device *netdev, unsigned int ntx,
			unsigned int nrx)
{}

static int fun_init_stats_area(struct funeth_priv *fp)
{}

static void fun_free_stats_area(struct funeth_priv *fp)
{}

static int fun_dl_port_register(struct net_device *netdev)
{}

/* Determine the max Tx/Rx queues for a port. */
static int fun_max_qs(struct fun_ethdev *ed, unsigned int *ntx,
		      unsigned int *nrx)
{}

static void fun_queue_defaults(struct net_device *dev, unsigned int nsqs)
{}

/* Replace the existing Rx/Tx/XDP queues with equal number of queues with
 * different settings, e.g. depth. This is a disruptive replacement that
 * temporarily shuts down the data path and should be limited to changes that
 * can't be applied to live queues. The old queues are always discarded.
 */
int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs,
		       struct netlink_ext_ack *extack)
{}

/* Change the number of Rx/Tx queues of a device while it is up. This is done
 * by incrementally adding/removing queues to meet the new requirements while
 * handling ongoing traffic.
 */
int fun_change_num_queues(struct net_device *dev, unsigned int ntx,
			  unsigned int nrx)
{}

static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid)
{}

static void fun_destroy_netdev(struct net_device *netdev)
{}

static int fun_create_ports(struct fun_ethdev *ed, unsigned int nports)
{}

static void fun_destroy_ports(struct fun_ethdev *ed)
{}

static void fun_update_link_state(const struct fun_ethdev *ed,
				  const struct fun_admin_port_notif *notif)
{}

/* handler for async events delivered through the admin CQ */
static void fun_event_cb(struct fun_dev *fdev, void *entry)
{}

/* handler for pending work managed by the service task */
static void fun_service_cb(struct fun_dev *fdev)
{}

static int funeth_sriov_configure(struct pci_dev *pdev, int nvfs)
{}

static int funeth_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{}

static void funeth_remove(struct pci_dev *pdev)
{}

static struct pci_driver funeth_driver =;

module_pci_driver();

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_DEVICE_TABLE(pci, funeth_id_table);