linux/drivers/net/ethernet/google/gve/gve_main.c

// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
 *
 * Copyright (C) 2015-2024 Google LLC
 */

#include <linux/bpf.h>
#include <linux/cpumask.h>
#include <linux/etherdevice.h>
#include <linux/filter.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/utsname.h>
#include <linux/version.h>
#include <net/netdev_queues.h>
#include <net/sch_generic.h>
#include <net/xdp_sock_drv.h>
#include "gve.h"
#include "gve_dqo.h"
#include "gve_adminq.h"
#include "gve_register.h"
#include "gve_utils.h"

#define GVE_DEFAULT_RX_COPYBREAK

#define DEFAULT_MSG_LEVEL
#define GVE_VERSION
#define GVE_VERSION_PREFIX

// Minimum amount of time between queue kicks in msec (10 seconds)
#define MIN_TX_TIMEOUT_GAP

char gve_driver_name[] =;
const char gve_version_str[] =;
static const char gve_version_prefix[] =;

static int gve_verify_driver_compatibility(struct gve_priv *priv)
{}

static netdev_features_t gve_features_check(struct sk_buff *skb,
					    struct net_device *dev,
					    netdev_features_t features)
{}

static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
{}

static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
{}

static int gve_alloc_flow_rule_caches(struct gve_priv *priv)
{}

static void gve_free_flow_rule_caches(struct gve_priv *priv)
{}

static int gve_alloc_counter_array(struct gve_priv *priv)
{}

static void gve_free_counter_array(struct gve_priv *priv)
{}

/* NIC requests to report stats */
static void gve_stats_report_task(struct work_struct *work)
{}

static void gve_stats_report_schedule(struct gve_priv *priv)
{}

static void gve_stats_report_timer(struct timer_list *t)
{}

static int gve_alloc_stats_report(struct gve_priv *priv)
{}

static void gve_free_stats_report(struct gve_priv *priv)
{}

static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
{}

static irqreturn_t gve_intr(int irq, void *arg)
{}

static irqreturn_t gve_intr_dqo(int irq, void *arg)
{}

static int gve_is_napi_on_home_cpu(struct gve_priv *priv, u32 irq)
{}

int gve_napi_poll(struct napi_struct *napi, int budget)
{}

int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
{}

static int gve_alloc_notify_blocks(struct gve_priv *priv)
{}

static void gve_free_notify_blocks(struct gve_priv *priv)
{}

static int gve_setup_device_resources(struct gve_priv *priv)
{}

static void gve_trigger_reset(struct gve_priv *priv);

static void gve_teardown_device_resources(struct gve_priv *priv)
{}

static int gve_unregister_qpl(struct gve_priv *priv,
			      struct gve_queue_page_list *qpl)
{}

static int gve_register_qpl(struct gve_priv *priv,
			    struct gve_queue_page_list *qpl)
{}

static struct gve_queue_page_list *gve_tx_get_qpl(struct gve_priv *priv, int idx)
{}

static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx)
{}

static int gve_register_xdp_qpls(struct gve_priv *priv)
{}

static int gve_register_qpls(struct gve_priv *priv)
{}

static int gve_unregister_xdp_qpls(struct gve_priv *priv)
{}

static int gve_unregister_qpls(struct gve_priv *priv)
{}

static int gve_create_xdp_rings(struct gve_priv *priv)
{}

static int gve_create_rings(struct gve_priv *priv)
{}

static void init_xdp_sync_stats(struct gve_priv *priv)
{}

static void gve_init_sync_stats(struct gve_priv *priv)
{}

static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
				      struct gve_tx_alloc_rings_cfg *cfg)
{}

static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings)
{}

static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
			       int num_rings)
{}

static int gve_alloc_xdp_rings(struct gve_priv *priv)
{}

static int gve_queues_mem_alloc(struct gve_priv *priv,
				struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
				struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{}

static int gve_destroy_xdp_rings(struct gve_priv *priv)
{}

static int gve_destroy_rings(struct gve_priv *priv)
{}

static void gve_free_xdp_rings(struct gve_priv *priv)
{}

static void gve_queues_mem_free(struct gve_priv *priv,
				struct gve_tx_alloc_rings_cfg *tx_cfg,
				struct gve_rx_alloc_rings_cfg *rx_cfg)
{}

int gve_alloc_page(struct gve_priv *priv, struct device *dev,
		   struct page **page, dma_addr_t *dma,
		   enum dma_data_direction dir, gfp_t gfp_flags)
{}

struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
						      u32 id, int pages)
{}

void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
		   enum dma_data_direction dir)
{}

void gve_free_queue_page_list(struct gve_priv *priv,
			      struct gve_queue_page_list *qpl,
			      u32 id)
{}

/* Use this to schedule a reset when the device is capable of continuing
 * to handle other requests in its current state. If it is not, do a reset
 * in thread instead.
 */
void gve_schedule_reset(struct gve_priv *priv)
{}

static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
static void gve_turndown(struct gve_priv *priv);
static void gve_turnup(struct gve_priv *priv);

static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
{}

static void gve_unreg_xdp_info(struct gve_priv *priv)
{}

static void gve_drain_page_cache(struct gve_priv *priv)
{}

static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
				      struct gve_rx_alloc_rings_cfg *cfg)
{}

void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
			     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
			     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{}

static void gve_rx_start_ring(struct gve_priv *priv, int i)
{}

static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
{}

static void gve_rx_stop_ring(struct gve_priv *priv, int i)
{}

static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
{}

static void gve_queues_mem_remove(struct gve_priv *priv)
{}

/* The passed-in queue memory is stored into priv and the queues are made live.
 * No memory is allocated. Passed-in memory is freed on errors.
 */
static int gve_queues_start(struct gve_priv *priv,
			    struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
			    struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{}

static int gve_open(struct net_device *dev)
{}

static int gve_queues_stop(struct gve_priv *priv)
{}

static int gve_close(struct net_device *dev)
{}

static int gve_remove_xdp_queues(struct gve_priv *priv)
{}

static int gve_add_xdp_queues(struct gve_priv *priv)
{}

static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
{}

static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
		       struct netlink_ext_ack *extack)
{}

static int gve_xsk_pool_enable(struct net_device *dev,
			       struct xsk_buff_pool *pool,
			       u16 qid)
{}

static int gve_xsk_pool_disable(struct net_device *dev,
				u16 qid)
{}

static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
{}

static int verify_xdp_configuration(struct net_device *dev)
{}

static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{}

int gve_flow_rules_reset(struct gve_priv *priv)
{}

int gve_adjust_config(struct gve_priv *priv,
		      struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
		      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{}

int gve_adjust_queues(struct gve_priv *priv,
		      struct gve_queue_config new_rx_config,
		      struct gve_queue_config new_tx_config)
{}

static void gve_turndown(struct gve_priv *priv)
{}

static void gve_turnup(struct gve_priv *priv)
{}

static void gve_turnup_and_check_status(struct gve_priv *priv)
{}

static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
{}

u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
{}

/* header-split is not supported on non-DQO_RDA yet even if device advertises it */
bool gve_header_split_supported(const struct gve_priv *priv)
{}

int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
{}

static int gve_set_features(struct net_device *netdev,
			    netdev_features_t features)
{}

static const struct net_device_ops gve_netdev_ops =;

static void gve_handle_status(struct gve_priv *priv, u32 status)
{}

static void gve_handle_reset(struct gve_priv *priv)
{}

void gve_handle_report_stats(struct gve_priv *priv)
{}

/* Handle NIC status register changes, reset requests and report stats */
static void gve_service_task(struct work_struct *work)
{}

static void gve_set_netdev_xdp_features(struct gve_priv *priv)
{}

static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
{}

static void gve_teardown_priv_resources(struct gve_priv *priv)
{}

static void gve_trigger_reset(struct gve_priv *priv)
{}

static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
{}

static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
{}

int gve_reset(struct gve_priv *priv, bool attempt_teardown)
{}

static void gve_write_version(u8 __iomem *driver_version_register)
{}

static int gve_rx_queue_stop(struct net_device *dev, void *per_q_mem, int idx)
{}

static void gve_rx_queue_mem_free(struct net_device *dev, void *per_q_mem)
{}

static int gve_rx_queue_mem_alloc(struct net_device *dev, void *per_q_mem,
				  int idx)
{}

static int gve_rx_queue_start(struct net_device *dev, void *per_q_mem, int idx)
{}

static const struct netdev_queue_mgmt_ops gve_queue_mgmt_ops =;

static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{}

static void gve_remove(struct pci_dev *pdev)
{}

static void gve_shutdown(struct pci_dev *pdev)
{}

#ifdef CONFIG_PM
static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
{}

static int gve_resume(struct pci_dev *pdev)
{}
#endif /* CONFIG_PM */

static const struct pci_device_id gve_id_table[] =;

static struct pci_driver gve_driver =;

module_pci_driver();

MODULE_DEVICE_TABLE(pci, gve_id_table);
MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_VERSION();