linux/net/core/dev.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 *      NET3    Protocol independent device support routines.
 *
 *	Derived from the non IP parts of dev.c 1.0.19
 *              Authors:	Ross Biro
 *				Fred N. van Kempen, <[email protected]>
 *				Mark Evans, <[email protected]>
 *
 *	Additional Authors:
 *		Florian la Roche <[email protected]>
 *		Alan Cox <[email protected]>
 *		David Hinds <[email protected]>
 *		Alexey Kuznetsov <[email protected]>
 *		Adam Sulmicki <[email protected]>
 *              Pekka Riikonen <[email protected]>
 *
 *	Changes:
 *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
 *                                      to 2 if register_netdev gets called
 *                                      before net_dev_init & also removed a
 *                                      few lines of code in the process.
 *		Alan Cox	:	device private ioctl copies fields back.
 *		Alan Cox	:	Transmit queue code does relevant
 *					stunts to keep the queue safe.
 *		Alan Cox	:	Fixed double lock.
 *		Alan Cox	:	Fixed promisc NULL pointer trap
 *		????????	:	Support the full private ioctl range
 *		Alan Cox	:	Moved ioctl permission check into
 *					drivers
 *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
 *		Alan Cox	:	100 backlog just doesn't cut it when
 *					you start doing multicast video 8)
 *		Alan Cox	:	Rewrote net_bh and list manager.
 *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
 *		Alan Cox	:	Took out transmit every packet pass
 *					Saved a few bytes in the ioctl handler
 *		Alan Cox	:	Network driver sets packet type before
 *					calling netif_rx. Saves a function
 *					call a packet.
 *		Alan Cox	:	Hashed net_bh()
 *		Richard Kooijman:	Timestamp fixes.
 *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
 *		Alan Cox	:	Device lock protection.
 *              Alan Cox        :       Fixed nasty side effect of device close
 *					changes.
 *		Rudi Cilibrasi	:	Pass the right thing to
 *					set_mac_address()
 *		Dave Miller	:	32bit quantity for the device lock to
 *					make it work out on a Sparc.
 *		Bjorn Ekwall	:	Added KERNELD hack.
 *		Alan Cox	:	Cleaned up the backlog initialise.
 *		Craig Metz	:	SIOCGIFCONF fix if space for under
 *					1 device.
 *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
 *					is no device open function.
 *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
 *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
 *		Cyrus Durgin	:	Cleaned for KMOD
 *		Adam Sulmicki   :	Bug Fix : Network Device Unload
 *					A network device unload needs to purge
 *					the backlog queue.
 *	Paul Rusty Russell	:	SIOCSIFNAME
 *              Pekka Riikonen  :	Netdev boot-time settings code
 *              Andrew Morton   :       Make unregister_netdevice wait
 *                                      indefinitely on dev->refcnt
 *              J Hadi Salim    :       - Backlog queue sampling
 *				        - netif_rx() feedback
 */

#include <linux/uaccess.h>
#include <linux/bitmap.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/hash.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/isolation.h>
#include <linux/sched/mm.h>
#include <linux/smpboot.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <linux/kthread.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/busy_poll.h>
#include <linux/rtnetlink.h>
#include <linux/stat.h>
#include <net/dsa.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/gro.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <net/tcx.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netpoll.h>
#include <linux/rcupdate.h>
#include <linux/delay.h>
#include <net/iw_handler.h>
#include <asm/current.h>
#include <linux/audit.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <net/mpls.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <trace/events/napi.h>
#include <trace/events/net.h>
#include <trace/events/skb.h>
#include <trace/events/qdisc.h>
#include <trace/events/xdp.h>
#include <linux/inetdevice.h>
#include <linux/cpu_rmap.h>
#include <linux/static_key.h>
#include <linux/hashtable.h>
#include <linux/vmalloc.h>
#include <linux/if_macvlan.h>
#include <linux/errqueue.h>
#include <linux/hrtimer.h>
#include <linux/netfilter_netdev.h>
#include <linux/crash_dump.h>
#include <linux/sctp.h>
#include <net/udp_tunnel.h>
#include <linux/net_namespace.h>
#include <linux/indirect_call_wrapper.h>
#include <net/devlink.h>
#include <linux/pm_runtime.h>
#include <linux/prandom.h>
#include <linux/once_lite.h>
#include <net/netdev_rx_queue.h>
#include <net/page_pool/types.h>
#include <net/page_pool/helpers.h>
#include <net/rps.h>

#include "dev.h"
#include "net-sysfs.h"

static DEFINE_SPINLOCK(ptype_lock);
struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;

static int netif_rx_internal(struct sk_buff *skb);
static int call_netdevice_notifiers_extack(unsigned long val,
					   struct net_device *dev,
					   struct netlink_ext_ack *extack);

static DEFINE_MUTEX(ifalias_mutex);

/* protects napi_hash addition/deletion and napi_gen_id */
static DEFINE_SPINLOCK(napi_hash_lock);

static unsigned int napi_gen_id =;
static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);

static DECLARE_RWSEM(devnet_rename_sem);

static inline void dev_base_seq_inc(struct net *net)
{}

static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
{}

static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
{}

#ifndef CONFIG_PREEMPT_RT

static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key);

static int __init setup_backlog_napi_threads(char *arg)
{}
early_param();

static bool use_backlog_threads(void)
{}

#else

static bool use_backlog_threads(void)
{
	return true;
}

#endif

static inline void backlog_lock_irq_save(struct softnet_data *sd,
					 unsigned long *flags)
{}

static inline void backlog_lock_irq_disable(struct softnet_data *sd)
{}

static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
					      unsigned long *flags)
{}

static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
{}

static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
						       const char *name)
{}

static struct netdev_name_node *
netdev_name_node_head_alloc(struct net_device *dev)
{}

static void netdev_name_node_free(struct netdev_name_node *name_node)
{}

static void netdev_name_node_add(struct net *net,
				 struct netdev_name_node *name_node)
{}

static void netdev_name_node_del(struct netdev_name_node *name_node)
{}

static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
							const char *name)
{}

static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
							    const char *name)
{}

bool netdev_name_in_use(struct net *net, const char *name)
{}
EXPORT_SYMBOL();

int netdev_name_node_alt_create(struct net_device *dev, const char *name)
{}

static void netdev_name_node_alt_free(struct rcu_head *head)
{}

static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
{}

int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
{}

static void netdev_name_node_alt_flush(struct net_device *dev)
{}

/* Device list insertion */
static void list_netdevice(struct net_device *dev)
{}

/* Device list removal
 * caller must respect a RCU grace period before freeing/reusing dev
 */
static void unlist_netdevice(struct net_device *dev)
{}

/*
 *	Our notifier list
 */

static RAW_NOTIFIER_HEAD(netdev_chain);

/*
 *	Device drivers call our routines to queue packets here. We empty the
 *	queue in the local softnet handler.
 */

DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) =;
EXPORT_PER_CPU_SYMBOL();

/* Page_pool has a lockless array/stack to alloc/recycle pages.
 * PP consumers must pay attention to run APIs in the appropriate context
 * (e.g. NAPI context).
 */
static DEFINE_PER_CPU(struct page_pool *, system_page_pool);

#ifdef CONFIG_LOCKDEP
/*
 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
 * according to dev->type
 */
static const unsigned short netdev_lock_type[] =;

static const char *const netdev_lock_name[] =;

static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];

static inline unsigned short netdev_lock_pos(unsigned short dev_type)
{}

static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
						 unsigned short dev_type)
{}

static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
{}
#else
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
						 unsigned short dev_type)
{
}

static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
{
}
#endif

/*******************************************************************************
 *
 *		Protocol management and registration routines
 *
 *******************************************************************************/


/*
 *	Add a protocol ID to the list. Now that the input handler is
 *	smarter we can dispense with all the messy stuff that used to be
 *	here.
 *
 *	BEWARE!!! Protocol handlers, mangling input packets,
 *	MUST BE last in hash buckets and checking protocol handlers
 *	MUST start from promiscuous ptype_all chain in net_bh.
 *	It is true now, do not change it.
 *	Explanation follows: if protocol handler, mangling packet, will
 *	be the first on list, it is not able to sense, that packet
 *	is cloned and should be copied-on-write, so that it will
 *	change it and subsequent readers will get broken packet.
 *							--ANK (980803)
 */

static inline struct list_head *ptype_head(const struct packet_type *pt)
{}

/**
 *	dev_add_pack - add packet handler
 *	@pt: packet type declaration
 *
 *	Add a protocol handler to the networking stack. The passed &packet_type
 *	is linked into kernel lists and may not be freed until it has been
 *	removed from the kernel lists.
 *
 *	This call does not sleep therefore it can not
 *	guarantee all CPU's that are in middle of receiving packets
 *	will see the new packet type (until the next received packet).
 */

void dev_add_pack(struct packet_type *pt)
{}
EXPORT_SYMBOL();

/**
 *	__dev_remove_pack	 - remove packet handler
 *	@pt: packet type declaration
 *
 *	Remove a protocol handler that was previously added to the kernel
 *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
 *	from the kernel lists and can be freed or reused once this function
 *	returns.
 *
 *      The packet type might still be in use by receivers
 *	and must not be freed until after all the CPU's have gone
 *	through a quiescent state.
 */
void __dev_remove_pack(struct packet_type *pt)
{}
EXPORT_SYMBOL();

/**
 *	dev_remove_pack	 - remove packet handler
 *	@pt: packet type declaration
 *
 *	Remove a protocol handler that was previously added to the kernel
 *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
 *	from the kernel lists and can be freed or reused once this function
 *	returns.
 *
 *	This call sleeps to guarantee that no CPU is looking at the packet
 *	type after return.
 */
void dev_remove_pack(struct packet_type *pt)
{}
EXPORT_SYMBOL();


/*******************************************************************************
 *
 *			    Device Interface Subroutines
 *
 *******************************************************************************/

/**
 *	dev_get_iflink	- get 'iflink' value of a interface
 *	@dev: targeted interface
 *
 *	Indicates the ifindex the interface is linked to.
 *	Physical interfaces have the same 'ifindex' and 'iflink' values.
 */

int dev_get_iflink(const struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 *	dev_fill_metadata_dst - Retrieve tunnel egress information.
 *	@dev: targeted interface
 *	@skb: The packet.
 *
 *	For better visibility of tunnel traffic OVS needs to retrieve
 *	egress tunnel information for a packet. Following API allows
 *	user to get this info.
 */
int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
{}
EXPORT_SYMBOL_GPL();

static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
{}

int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
			  struct net_device_path_stack *stack)
{}
EXPORT_SYMBOL_GPL();

/**
 *	__dev_get_by_name	- find a device by its name
 *	@net: the applicable net namespace
 *	@name: name to find
 *
 *	Find an interface by name. Must be called under RTNL semaphore.
 *	If the name is found a pointer to the device is returned.
 *	If the name is not found then %NULL is returned. The
 *	reference counters are not incremented so the caller must be
 *	careful with locks.
 */

struct net_device *__dev_get_by_name(struct net *net, const char *name)
{}
EXPORT_SYMBOL();

/**
 * dev_get_by_name_rcu	- find a device by its name
 * @net: the applicable net namespace
 * @name: name to find
 *
 * Find an interface by name.
 * If the name is found a pointer to the device is returned.
 * If the name is not found then %NULL is returned.
 * The reference counters are not incremented so the caller must be
 * careful with locks. The caller must hold RCU lock.
 */

struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
{}
EXPORT_SYMBOL();

/* Deprecated for new users, call netdev_get_by_name() instead */
struct net_device *dev_get_by_name(struct net *net, const char *name)
{}
EXPORT_SYMBOL();

/**
 *	netdev_get_by_name() - find a device by its name
 *	@net: the applicable net namespace
 *	@name: name to find
 *	@tracker: tracking object for the acquired reference
 *	@gfp: allocation flags for the tracker
 *
 *	Find an interface by name. This can be called from any
 *	context and does its own locking. The returned handle has
 *	the usage count incremented and the caller must use netdev_put() to
 *	release it when it is no longer needed. %NULL is returned if no
 *	matching device is found.
 */
struct net_device *netdev_get_by_name(struct net *net, const char *name,
				      netdevice_tracker *tracker, gfp_t gfp)
{}
EXPORT_SYMBOL();

/**
 *	__dev_get_by_index - find a device by its ifindex
 *	@net: the applicable net namespace
 *	@ifindex: index of device
 *
 *	Search for an interface by index. Returns %NULL if the device
 *	is not found or a pointer to the device. The device has not
 *	had its reference counter increased so the caller must be careful
 *	about locking. The caller must hold the RTNL semaphore.
 */

struct net_device *__dev_get_by_index(struct net *net, int ifindex)
{}
EXPORT_SYMBOL();

/**
 *	dev_get_by_index_rcu - find a device by its ifindex
 *	@net: the applicable net namespace
 *	@ifindex: index of device
 *
 *	Search for an interface by index. Returns %NULL if the device
 *	is not found or a pointer to the device. The device has not
 *	had its reference counter increased so the caller must be careful
 *	about locking. The caller must hold RCU lock.
 */

struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
{}
EXPORT_SYMBOL();

/* Deprecated for new users, call netdev_get_by_index() instead */
struct net_device *dev_get_by_index(struct net *net, int ifindex)
{}
EXPORT_SYMBOL();

/**
 *	netdev_get_by_index() - find a device by its ifindex
 *	@net: the applicable net namespace
 *	@ifindex: index of device
 *	@tracker: tracking object for the acquired reference
 *	@gfp: allocation flags for the tracker
 *
 *	Search for an interface by index. Returns NULL if the device
 *	is not found or a pointer to the device. The device returned has
 *	had a reference added and the pointer is safe until the user calls
 *	netdev_put() to indicate they have finished with it.
 */
struct net_device *netdev_get_by_index(struct net *net, int ifindex,
				       netdevice_tracker *tracker, gfp_t gfp)
{}
EXPORT_SYMBOL();

/**
 *	dev_get_by_napi_id - find a device by napi_id
 *	@napi_id: ID of the NAPI struct
 *
 *	Search for an interface by NAPI ID. Returns %NULL if the device
 *	is not found or a pointer to the device. The device has not had
 *	its reference counter increased so the caller must be careful
 *	about locking. The caller must hold RCU lock.
 */

struct net_device *dev_get_by_napi_id(unsigned int napi_id)
{}
EXPORT_SYMBOL();

static DEFINE_SEQLOCK(netdev_rename_lock);

void netdev_copy_name(struct net_device *dev, char *name)
{}

/**
 *	netdev_get_name - get a netdevice name, knowing its ifindex.
 *	@net: network namespace
 *	@name: a pointer to the buffer where the name will be stored.
 *	@ifindex: the ifindex of the interface to get the name from.
 */
int netdev_get_name(struct net *net, char *name, int ifindex)
{}

/**
 *	dev_getbyhwaddr_rcu - find a device by its hardware address
 *	@net: the applicable net namespace
 *	@type: media type of device
 *	@ha: hardware address
 *
 *	Search for an interface by MAC address. Returns NULL if the device
 *	is not found or a pointer to the device.
 *	The caller must hold RCU or RTNL.
 *	The returned device has not had its ref count increased
 *	and the caller must therefore be careful about locking
 *
 */

struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
				       const char *ha)
{}
EXPORT_SYMBOL();

struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
{}
EXPORT_SYMBOL();

/**
 *	__dev_get_by_flags - find any device with given flags
 *	@net: the applicable net namespace
 *	@if_flags: IFF_* values
 *	@mask: bitmask of bits in if_flags to check
 *
 *	Search for any interface with the given flags. Returns NULL if a device
 *	is not found or a pointer to the device. Must be called inside
 *	rtnl_lock(), and result refcount is unchanged.
 */

struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
				      unsigned short mask)
{}
EXPORT_SYMBOL();

/**
 *	dev_valid_name - check if name is okay for network device
 *	@name: name string
 *
 *	Network device names need to be valid file names to
 *	allow sysfs to work.  We also disallow any kind of
 *	whitespace.
 */
bool dev_valid_name(const char *name)
{}
EXPORT_SYMBOL();

/**
 *	__dev_alloc_name - allocate a name for a device
 *	@net: network namespace to allocate the device name in
 *	@name: name format string
 *	@res: result name string
 *
 *	Passed a format string - eg "lt%d" it will try and find a suitable
 *	id. It scans list of devices to build up a free map, then chooses
 *	the first empty slot. The caller must hold the dev_base or rtnl lock
 *	while allocating the name and adding the device in order to avoid
 *	duplicates.
 *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
 *	Returns the number of the unit assigned or a negative errno code.
 */

static int __dev_alloc_name(struct net *net, const char *name, char *res)
{}

/* Returns negative errno or allocated unit id (see __dev_alloc_name()) */
static int dev_prep_valid_name(struct net *net, struct net_device *dev,
			       const char *want_name, char *out_name,
			       int dup_errno)
{}

/**
 *	dev_alloc_name - allocate a name for a device
 *	@dev: device
 *	@name: name format string
 *
 *	Passed a format string - eg "lt%d" it will try and find a suitable
 *	id. It scans list of devices to build up a free map, then chooses
 *	the first empty slot. The caller must hold the dev_base or rtnl lock
 *	while allocating the name and adding the device in order to avoid
 *	duplicates.
 *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
 *	Returns the number of the unit assigned or a negative errno code.
 */

int dev_alloc_name(struct net_device *dev, const char *name)
{}
EXPORT_SYMBOL();

static int dev_get_valid_name(struct net *net, struct net_device *dev,
			      const char *name)
{}

/**
 *	dev_change_name - change name of a device
 *	@dev: device
 *	@newname: name (or format string) must be at least IFNAMSIZ
 *
 *	Change name of a device, can pass format strings "eth%d".
 *	for wildcarding.
 */
int dev_change_name(struct net_device *dev, const char *newname)
{}

/**
 *	dev_set_alias - change ifalias of a device
 *	@dev: device
 *	@alias: name up to IFALIASZ
 *	@len: limit of bytes to copy from info
 *
 *	Set ifalias for a device,
 */
int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
{}
EXPORT_SYMBOL();

/**
 *	dev_get_alias - get ifalias of a device
 *	@dev: device
 *	@name: buffer to store name of ifalias
 *	@len: size of buffer
 *
 *	get ifalias for a device.  Caller must make sure dev cannot go
 *	away,  e.g. rcu read lock or own a reference count to device.
 */
int dev_get_alias(const struct net_device *dev, char *name, size_t len)
{}

/**
 *	netdev_features_change - device changes features
 *	@dev: device to cause notification
 *
 *	Called to indicate a device has changed features.
 */
void netdev_features_change(struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 *	netdev_state_change - device changes state
 *	@dev: device to cause notification
 *
 *	Called to indicate a device has changed state. This function calls
 *	the notifier chains for netdev_chain and sends a NEWLINK message
 *	to the routing socket.
 */
void netdev_state_change(struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 * __netdev_notify_peers - notify network peers about existence of @dev,
 * to be called when rtnl lock is already held.
 * @dev: network device
 *
 * Generate traffic such that interested network peers are aware of
 * @dev, such as by generating a gratuitous ARP. This may be used when
 * a device wants to inform the rest of the network about some sort of
 * reconfiguration such as a failover event or virtual machine
 * migration.
 */
void __netdev_notify_peers(struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 * netdev_notify_peers - notify network peers about existence of @dev
 * @dev: network device
 *
 * Generate traffic such that interested network peers are aware of
 * @dev, such as by generating a gratuitous ARP. This may be used when
 * a device wants to inform the rest of the network about some sort of
 * reconfiguration such as a failover event or virtual machine
 * migration.
 */
void netdev_notify_peers(struct net_device *dev)
{}
EXPORT_SYMBOL();

static int napi_threaded_poll(void *data);

static int napi_kthread_create(struct napi_struct *n)
{}

static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
{}

/**
 *	dev_open	- prepare an interface for use.
 *	@dev: device to open
 *	@extack: netlink extended ack
 *
 *	Takes a device from down to up state. The device's private open
 *	function is invoked and then the multicast lists are loaded. Finally
 *	the device is moved into the up state and a %NETDEV_UP message is
 *	sent to the netdev notifier chain.
 *
 *	Calling this function on an active interface is a nop. On a failure
 *	a negative errno code is returned.
 */
int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
{}
EXPORT_SYMBOL();

static void __dev_close_many(struct list_head *head)
{}

static void __dev_close(struct net_device *dev)
{}

void dev_close_many(struct list_head *head, bool unlink)
{}
EXPORT_SYMBOL();

/**
 *	dev_close - shutdown an interface.
 *	@dev: device to shutdown
 *
 *	This function moves an active device into down state. A
 *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
 *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
 *	chain.
 */
void dev_close(struct net_device *dev)
{}
EXPORT_SYMBOL();


/**
 *	dev_disable_lro - disable Large Receive Offload on a device
 *	@dev: device
 *
 *	Disable Large Receive Offload (LRO) on a net device.  Must be
 *	called under RTNL.  This is needed if received packets may be
 *	forwarded to another interface.
 */
void dev_disable_lro(struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 *	dev_disable_gro_hw - disable HW Generic Receive Offload on a device
 *	@dev: device
 *
 *	Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
 *	called under RTNL.  This is needed if Generic XDP is installed on
 *	the device.
 */
static void dev_disable_gro_hw(struct net_device *dev)
{}

const char *netdev_cmd_to_name(enum netdev_cmd cmd)
{}
EXPORT_SYMBOL_GPL();

static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
				   struct net_device *dev)
{}

static int call_netdevice_register_notifiers(struct notifier_block *nb,
					     struct net_device *dev)
{}

static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
						struct net_device *dev)
{}

static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
						 struct net *net)
{}

static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
						    struct net *net)
{}

static int dev_boot_phase =;

/**
 * register_netdevice_notifier - register a network notifier block
 * @nb: notifier
 *
 * Register a notifier to be called when network device events occur.
 * The notifier passed is linked into the kernel structures and must
 * not be reused until it has been unregistered. A negative errno code
 * is returned on a failure.
 *
 * When registered all registration and up events are replayed
 * to the new notifier to allow device to have a race free
 * view of the network device list.
 */

int register_netdevice_notifier(struct notifier_block *nb)
{}
EXPORT_SYMBOL();

/**
 * unregister_netdevice_notifier - unregister a network notifier block
 * @nb: notifier
 *
 * Unregister a notifier previously registered by
 * register_netdevice_notifier(). The notifier is unlinked into the
 * kernel structures and may then be reused. A negative errno code
 * is returned on a failure.
 *
 * After unregistering unregister and down device events are synthesized
 * for all devices on the device list to the removed notifier to remove
 * the need for special case cleanup code.
 */

int unregister_netdevice_notifier(struct notifier_block *nb)
{}
EXPORT_SYMBOL();

static int __register_netdevice_notifier_net(struct net *net,
					     struct notifier_block *nb,
					     bool ignore_call_fail)
{}

static int __unregister_netdevice_notifier_net(struct net *net,
					       struct notifier_block *nb)
{}

/**
 * register_netdevice_notifier_net - register a per-netns network notifier block
 * @net: network namespace
 * @nb: notifier
 *
 * Register a notifier to be called when network device events occur.
 * The notifier passed is linked into the kernel structures and must
 * not be reused until it has been unregistered. A negative errno code
 * is returned on a failure.
 *
 * When registered all registration and up events are replayed
 * to the new notifier to allow device to have a race free
 * view of the network device list.
 */

int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
{}
EXPORT_SYMBOL();

/**
 * unregister_netdevice_notifier_net - unregister a per-netns
 *                                     network notifier block
 * @net: network namespace
 * @nb: notifier
 *
 * Unregister a notifier previously registered by
 * register_netdevice_notifier_net(). The notifier is unlinked from the
 * kernel structures and may then be reused. A negative errno code
 * is returned on a failure.
 *
 * After unregistering unregister and down device events are synthesized
 * for all devices on the device list to the removed notifier to remove
 * the need for special case cleanup code.
 */

int unregister_netdevice_notifier_net(struct net *net,
				      struct notifier_block *nb)
{}
EXPORT_SYMBOL();

static void __move_netdevice_notifier_net(struct net *src_net,
					  struct net *dst_net,
					  struct notifier_block *nb)
{}

int register_netdevice_notifier_dev_net(struct net_device *dev,
					struct notifier_block *nb,
					struct netdev_net_notifier *nn)
{}
EXPORT_SYMBOL();

int unregister_netdevice_notifier_dev_net(struct net_device *dev,
					  struct notifier_block *nb,
					  struct netdev_net_notifier *nn)
{}
EXPORT_SYMBOL();

static void move_netdevice_notifiers_dev_net(struct net_device *dev,
					     struct net *net)
{}

/**
 *	call_netdevice_notifiers_info - call all network notifier blocks
 *	@val: value passed unmodified to notifier function
 *	@info: notifier information data
 *
 *	Call all network notifier blocks.  Parameters and return value
 *	are as for raw_notifier_call_chain().
 */

int call_netdevice_notifiers_info(unsigned long val,
				  struct netdev_notifier_info *info)
{}

/**
 *	call_netdevice_notifiers_info_robust - call per-netns notifier blocks
 *	                                       for and rollback on error
 *	@val_up: value passed unmodified to notifier function
 *	@val_down: value passed unmodified to the notifier function when
 *	           recovering from an error on @val_up
 *	@info: notifier information data
 *
 *	Call all per-netns network notifier blocks, but not notifier blocks on
 *	the global notifier chain. Parameters and return value are as for
 *	raw_notifier_call_chain_robust().
 */

static int
call_netdevice_notifiers_info_robust(unsigned long val_up,
				     unsigned long val_down,
				     struct netdev_notifier_info *info)
{}

static int call_netdevice_notifiers_extack(unsigned long val,
					   struct net_device *dev,
					   struct netlink_ext_ack *extack)
{}

/**
 *	call_netdevice_notifiers - call all network notifier blocks
 *      @val: value passed unmodified to notifier function
 *      @dev: net_device pointer passed unmodified to notifier function
 *
 *	Call all network notifier blocks.  Parameters and return value
 *	are as for raw_notifier_call_chain().
 */

int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 *	call_netdevice_notifiers_mtu - call all network notifier blocks
 *	@val: value passed unmodified to notifier function
 *	@dev: net_device pointer passed unmodified to notifier function
 *	@arg: additional u32 argument passed to the notifier function
 *
 *	Call all network notifier blocks.  Parameters and return value
 *	are as for raw_notifier_call_chain().
 */
static int call_netdevice_notifiers_mtu(unsigned long val,
					struct net_device *dev, u32 arg)
{}

#ifdef CONFIG_NET_INGRESS
static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);

void net_inc_ingress_queue(void)
{}
EXPORT_SYMBOL_GPL();

void net_dec_ingress_queue(void)
{}
EXPORT_SYMBOL_GPL();
#endif

#ifdef CONFIG_NET_EGRESS
static DEFINE_STATIC_KEY_FALSE(egress_needed_key);

void net_inc_egress_queue(void)
{}
EXPORT_SYMBOL_GPL();

void net_dec_egress_queue(void)
{}
EXPORT_SYMBOL_GPL();
#endif

#ifdef CONFIG_NET_CLS_ACT
DEFINE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key);
EXPORT_SYMBOL();
#endif

DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
EXPORT_SYMBOL();
#ifdef CONFIG_JUMP_LABEL
static atomic_t netstamp_needed_deferred;
static atomic_t netstamp_wanted;
static void netstamp_clear(struct work_struct *work)
{}
static DECLARE_WORK(netstamp_work, netstamp_clear);
#endif

void net_enable_timestamp(void)
{}
EXPORT_SYMBOL();

void net_disable_timestamp(void)
{}
EXPORT_SYMBOL();

static inline void net_timestamp_set(struct sk_buff *skb)
{}

#define net_timestamp_check(COND, SKB)							\

bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
{}
EXPORT_SYMBOL_GPL();

static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
			      bool check_mtu)
{}

int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{}
EXPORT_SYMBOL_GPL();

/**
 * dev_forward_skb - loopback an skb to another netif
 *
 * @dev: destination network device
 * @skb: buffer to forward
 *
 * return values:
 *	NET_RX_SUCCESS	(no congestion)
 *	NET_RX_DROP     (packet was dropped, but freed)
 *
 * dev_forward_skb can be used for injecting an skb from the
 * start_xmit function of one device into the receive queue
 * of another device.
 *
 * The receiving device may be in another namespace, so
 * we have to clear all information in the skb that could
 * impact namespace isolation.
 */
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{}
EXPORT_SYMBOL_GPL();

int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
{}

static inline int deliver_skb(struct sk_buff *skb,
			      struct packet_type *pt_prev,
			      struct net_device *orig_dev)
{}

static inline void deliver_ptype_list_skb(struct sk_buff *skb,
					  struct packet_type **pt,
					  struct net_device *orig_dev,
					  __be16 type,
					  struct list_head *ptype_list)
{}

static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
{}

/**
 * dev_nit_active - return true if any network interface taps are in use
 *
 * @dev: network device to check for the presence of taps
 */
bool dev_nit_active(struct net_device *dev)
{}
EXPORT_SYMBOL_GPL();

/*
 *	Support routine. Sends outgoing frames to any network
 *	taps currently in use.
 */

void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
{}
EXPORT_SYMBOL_GPL();

/**
 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
 * @dev: Network device
 * @txq: number of queues available
 *
 * If real_num_tx_queues is changed the tc mappings may no longer be
 * valid. To resolve this verify the tc mapping remains valid and if
 * not NULL the mapping. With no priorities mapping to this
 * offset/count pair it will no longer be used. In the worst case TC0
 * is invalid nothing can be done so disable priority mappings. If is
 * expected that drivers will fix this mapping if they can before
 * calling netif_set_real_num_tx_queues.
 */
static void netif_setup_tc(struct net_device *dev, unsigned int txq)
{}

int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
{}
EXPORT_SYMBOL();

#ifdef CONFIG_XPS
static struct static_key xps_needed __read_mostly;
static struct static_key xps_rxqs_needed __read_mostly;
static DEFINE_MUTEX(xps_map_mutex);
#define xmap_dereference(P)

static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
			     struct xps_dev_maps *old_maps, int tci, u16 index)
{}

static bool remove_xps_queue_cpu(struct net_device *dev,
				 struct xps_dev_maps *dev_maps,
				 int cpu, u16 offset, u16 count)
{}

static void reset_xps_maps(struct net_device *dev,
			   struct xps_dev_maps *dev_maps,
			   enum xps_map_type type)
{}

static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
			   u16 offset, u16 count)
{}

static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
				   u16 count)
{}

static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
{}

static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
				      u16 index, bool is_rxqs_map)
{}

/* Copy xps maps at a given index */
static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
			      struct xps_dev_maps *new_dev_maps, int index,
			      int tc, bool skip_tc)
{}

/* Must be called under cpus_read_lock */
int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
			  u16 index, enum xps_map_type type)
{}
EXPORT_SYMBOL_GPL();

int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
			u16 index)
{}
EXPORT_SYMBOL();

#endif
static void netdev_unbind_all_sb_channels(struct net_device *dev)
{}

void netdev_reset_tc(struct net_device *dev)
{}
EXPORT_SYMBOL();

int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
{}
EXPORT_SYMBOL();

int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
{}
EXPORT_SYMBOL();

void netdev_unbind_sb_channel(struct net_device *dev,
			      struct net_device *sb_dev)
{}
EXPORT_SYMBOL();

int netdev_bind_sb_channel_queue(struct net_device *dev,
				 struct net_device *sb_dev,
				 u8 tc, u16 count, u16 offset)
{}
EXPORT_SYMBOL();

int netdev_set_sb_channel(struct net_device *dev, u16 channel)
{}
EXPORT_SYMBOL();

/*
 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
 */
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
{}
EXPORT_SYMBOL();

#ifdef CONFIG_SYSFS
/**
 *	netif_set_real_num_rx_queues - set actual number of RX queues used
 *	@dev: Network device
 *	@rxq: Actual number of RX queues
 *
 *	This must be called either with the rtnl_lock held or before
 *	registration of the net device.  Returns 0 on success, or a
 *	negative error code.  If called before registration, it always
 *	succeeds.
 */
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
{}
EXPORT_SYMBOL();
#endif

/**
 *	netif_set_real_num_queues - set actual number of RX and TX queues used
 *	@dev: Network device
 *	@txq: Actual number of TX queues
 *	@rxq: Actual number of RX queues
 *
 *	Set the real number of both TX and RX queues.
 *	Does nothing if the number of queues is already correct.
 */
int netif_set_real_num_queues(struct net_device *dev,
			      unsigned int txq, unsigned int rxq)
{}
EXPORT_SYMBOL();

/**
 * netif_set_tso_max_size() - set the max size of TSO frames supported
 * @dev:	netdev to update
 * @size:	max skb->len of a TSO frame
 *
 * Set the limit on the size of TSO super-frames the device can handle.
 * Unless explicitly set the stack will assume the value of
 * %GSO_LEGACY_MAX_SIZE.
 */
void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
{}
EXPORT_SYMBOL();

/**
 * netif_set_tso_max_segs() - set the max number of segs supported for TSO
 * @dev:	netdev to update
 * @segs:	max number of TCP segments
 *
 * Set the limit on the number of TCP segments the device can generate from
 * a single TSO super-frame.
 * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS.
 */
void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs)
{}
EXPORT_SYMBOL();

/**
 * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper
 * @to:		netdev to update
 * @from:	netdev from which to copy the limits
 */
void netif_inherit_tso_max(struct net_device *to, const struct net_device *from)
{}
EXPORT_SYMBOL();

/**
 * netif_get_num_default_rss_queues - default number of RSS queues
 *
 * Default value is the number of physical cores if there are only 1 or 2, or
 * divided by 2 if there are more.
 */
int netif_get_num_default_rss_queues(void)
{}
EXPORT_SYMBOL();

static void __netif_reschedule(struct Qdisc *q)
{}

void __netif_schedule(struct Qdisc *q)
{}
EXPORT_SYMBOL();

struct dev_kfree_skb_cb {};

static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
{}

void netif_schedule_queue(struct netdev_queue *txq)
{}
EXPORT_SYMBOL();

void netif_tx_wake_queue(struct netdev_queue *dev_queue)
{}
EXPORT_SYMBOL();

void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason)
{}
EXPORT_SYMBOL();

void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason)
{}
EXPORT_SYMBOL();


/**
 * netif_device_detach - mark device as removed
 * @dev: network device
 *
 * Mark device as removed from system and therefore no longer available.
 */
void netif_device_detach(struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 * netif_device_attach - mark device as attached
 * @dev: network device
 *
 * Mark device as attached from system and restart if needed.
 */
void netif_device_attach(struct net_device *dev)
{}
EXPORT_SYMBOL();

/*
 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
 * to be used as a distribution range.
 */
static u16 skb_tx_hash(const struct net_device *dev,
		       const struct net_device *sb_dev,
		       struct sk_buff *skb)
{}

void skb_warn_bad_offload(const struct sk_buff *skb)
{}

/*
 * Invalidate hardware checksum when packet is to be mangled, and
 * complete checksum manually on outgoing path.
 */
int skb_checksum_help(struct sk_buff *skb)
{}
EXPORT_SYMBOL();

int skb_crc32c_csum_help(struct sk_buff *skb)
{}

__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
{}


/* Take action when hardware reception checksum errors are detected. */
#ifdef CONFIG_BUG
static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
{}

void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
{}
EXPORT_SYMBOL();
#endif

/* XXX: check that highmem exists at all on the given machine. */
static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
{}

/* If MPLS offload request, verify we are testing hardware MPLS features
 * instead of standard features for the netdev.
 */
#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
static netdev_features_t net_mpls_features(struct sk_buff *skb,
					   netdev_features_t features,
					   __be16 type)
{}
#else
static netdev_features_t net_mpls_features(struct sk_buff *skb,
					   netdev_features_t features,
					   __be16 type)
{
	return features;
}
#endif

static netdev_features_t harmonize_features(struct sk_buff *skb,
	netdev_features_t features)
{}

netdev_features_t passthru_features_check(struct sk_buff *skb,
					  struct net_device *dev,
					  netdev_features_t features)
{}
EXPORT_SYMBOL();

static netdev_features_t dflt_features_check(struct sk_buff *skb,
					     struct net_device *dev,
					     netdev_features_t features)
{}

static netdev_features_t gso_features_check(const struct sk_buff *skb,
					    struct net_device *dev,
					    netdev_features_t features)
{}

netdev_features_t netif_skb_features(struct sk_buff *skb)
{}
EXPORT_SYMBOL();

static int xmit_one(struct sk_buff *skb, struct net_device *dev,
		    struct netdev_queue *txq, bool more)
{}

struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
				    struct netdev_queue *txq, int *ret)
{}

static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
					  netdev_features_t features)
{}

int skb_csum_hwoffload_help(struct sk_buff *skb,
			    const netdev_features_t features)
{}
EXPORT_SYMBOL();

static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
{}

struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
{}
EXPORT_SYMBOL_GPL();

static void qdisc_pkt_len_init(struct sk_buff *skb)
{}

static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
			     struct sk_buff **to_free,
			     struct netdev_queue *txq)
{}

static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
				 struct net_device *dev,
				 struct netdev_queue *txq)
{}

#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
static void skb_update_prio(struct sk_buff *skb)
{}
#else
#define skb_update_prio
#endif

/**
 *	dev_loopback_xmit - loop back @skb
 *	@net: network namespace this loopback is happening in
 *	@sk:  sk needed to be a netfilter okfn
 *	@skb: buffer to transmit
 */
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{}
EXPORT_SYMBOL();

#ifdef CONFIG_NET_EGRESS
static struct netdev_queue *
netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
{}

#ifndef CONFIG_PREEMPT_RT
static bool netdev_xmit_txqueue_skipped(void)
{}

void netdev_xmit_skip_txqueue(bool skip)
{}
EXPORT_SYMBOL_GPL();

#else
static bool netdev_xmit_txqueue_skipped(void)
{
	return current->net_xmit.skip_txqueue;
}

void netdev_xmit_skip_txqueue(bool skip)
{
	current->net_xmit.skip_txqueue = skip;
}
EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
#endif
#endif /* CONFIG_NET_EGRESS */

#ifdef CONFIG_NET_XGRESS
static int tc_run(struct tcx_entry *entry, struct sk_buff *skb,
		  enum skb_drop_reason *drop_reason)
{}

static DEFINE_STATIC_KEY_FALSE(tcx_needed_key);

void tcx_inc(void)
{}

void tcx_dec(void)
{}

static __always_inline enum tcx_action_base
tcx_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
	const bool needs_mac)
{}

static __always_inline struct sk_buff *
sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
		   struct net_device *orig_dev, bool *another)
{}

static __always_inline struct sk_buff *
sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
{}
#else
static __always_inline struct sk_buff *
sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
		   struct net_device *orig_dev, bool *another)
{
	return skb;
}

static __always_inline struct sk_buff *
sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
{
	return skb;
}
#endif /* CONFIG_NET_XGRESS */

#ifdef CONFIG_XPS
static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
			       struct xps_dev_maps *dev_maps, unsigned int tci)
{}
#endif

static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
			 struct sk_buff *skb)
{}

u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
		     struct net_device *sb_dev)
{}
EXPORT_SYMBOL();

u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
		       struct net_device *sb_dev)
{}
EXPORT_SYMBOL();

u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
		     struct net_device *sb_dev)
{}
EXPORT_SYMBOL();

struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
					 struct sk_buff *skb,
					 struct net_device *sb_dev)
{}

/**
 * __dev_queue_xmit() - transmit a buffer
 * @skb:	buffer to transmit
 * @sb_dev:	suboordinate device used for L2 forwarding offload
 *
 * Queue a buffer for transmission to a network device. The caller must
 * have set the device and priority and built the buffer before calling
 * this function. The function can be called from an interrupt.
 *
 * When calling this method, interrupts MUST be enabled. This is because
 * the BH enable code must have IRQs enabled so that it will not deadlock.
 *
 * Regardless of the return value, the skb is consumed, so it is currently
 * difficult to retry a send to this method. (You can bump the ref count
 * before sending to hold a reference for retry if you are careful.)
 *
 * Return:
 * * 0				- buffer successfully transmitted
 * * positive qdisc return code	- NET_XMIT_DROP etc.
 * * negative errno		- other errors
 */
int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
{}
EXPORT_SYMBOL();

int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
{}
EXPORT_SYMBOL();

/*************************************************************************
 *			Receiver routines
 *************************************************************************/
static DEFINE_PER_CPU(struct task_struct *, backlog_napi);

int weight_p __read_mostly =;           /* old backlog weight */
int dev_weight_rx_bias __read_mostly =;  /* bias for backlog weight */
int dev_weight_tx_bias __read_mostly =;  /* bias for output_queue quota */

/* Called with irq disabled */
static inline void ____napi_schedule(struct softnet_data *sd,
				     struct napi_struct *napi)
{}

#ifdef CONFIG_RPS

struct static_key_false rps_needed __read_mostly;
EXPORT_SYMBOL();
struct static_key_false rfs_needed __read_mostly;
EXPORT_SYMBOL();

static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
	    struct rps_dev_flow *rflow, u16 next_cpu)
{}

/*
 * get_rps_cpu is called from netif_receive_skb and returns the target
 * CPU from the RPS map of the receiving queue for a given skb.
 * rcu_read_lock must be held on entry.
 */
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
		       struct rps_dev_flow **rflowp)
{}

#ifdef CONFIG_RFS_ACCEL

/**
 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
 * @dev: Device on which the filter was set
 * @rxq_index: RX queue index
 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
 *
 * Drivers that implement ndo_rx_flow_steer() should periodically call
 * this function for each installed filter and remove the filters for
 * which it returns %true.
 */
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
			 u32 flow_id, u16 filter_id)
{}
EXPORT_SYMBOL();

#endif /* CONFIG_RFS_ACCEL */

/* Called from hardirq (IPI) context */
static void rps_trigger_softirq(void *data)
{}

#endif /* CONFIG_RPS */

/* Called from hardirq (IPI) context */
static void trigger_rx_softirq(void *data)
{}

/*
 * After we queued a packet into sd->input_pkt_queue,
 * we need to make sure this queue is serviced soon.
 *
 * - If this is another cpu queue, link it to our rps_ipi_list,
 *   and make sure we will process rps_ipi_list from net_rx_action().
 *
 * - If this is our own queue, NAPI schedule our backlog.
 *   Note that this also raises NET_RX_SOFTIRQ.
 */
static void napi_schedule_rps(struct softnet_data *sd)
{}

void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu)
{}

#ifdef CONFIG_NET_FLOW_LIMIT
int netdev_flow_limit_table_len __read_mostly =;
#endif

static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
{}

/*
 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
 * queue (may be a remote CPU queue).
 */
static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
			      unsigned int *qtail)
{}

static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
{}

u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
			     struct bpf_prog *xdp_prog)
{}

static int
netif_skb_check_for_xdp(struct sk_buff **pskb, struct bpf_prog *prog)
{}

static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
				     struct xdp_buff *xdp,
				     struct bpf_prog *xdp_prog)
{}

/* When doing generic XDP we have to bypass the qdisc layer and the
 * network taps in order to match in-driver-XDP behavior. This also means
 * that XDP packets are able to starve other packets going through a qdisc,
 * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
 * queues, so they do not have this starvation issue.
 */
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
{}

static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);

int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
{}
EXPORT_SYMBOL_GPL();

static int netif_rx_internal(struct sk_buff *skb)
{}

/**
 *	__netif_rx	-	Slightly optimized version of netif_rx
 *	@skb: buffer to post
 *
 *	This behaves as netif_rx except that it does not disable bottom halves.
 *	As a result this function may only be invoked from the interrupt context
 *	(either hard or soft interrupt).
 */
int __netif_rx(struct sk_buff *skb)
{}
EXPORT_SYMBOL();

/**
 *	netif_rx	-	post buffer to the network code
 *	@skb: buffer to post
 *
 *	This function receives a packet from a device driver and queues it for
 *	the upper (protocol) levels to process via the backlog NAPI device. It
 *	always succeeds. The buffer may be dropped during processing for
 *	congestion control or by the protocol layers.
 *	The network buffer is passed via the backlog NAPI device. Modern NIC
 *	driver should use NAPI and GRO.
 *	This function can used from interrupt and from process context. The
 *	caller from process context must not disable interrupts before invoking
 *	this function.
 *
 *	return values:
 *	NET_RX_SUCCESS	(no congestion)
 *	NET_RX_DROP     (packet was dropped)
 *
 */
int netif_rx(struct sk_buff *skb)
{}
EXPORT_SYMBOL();

static __latent_entropy void net_tx_action(struct softirq_action *h)
{}

#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
/* This hook is defined here for ATM LANE */
int (*br_fdb_test_addr_hook)(struct net_device *dev,
			     unsigned char *addr) __read_mostly;
EXPORT_SYMBOL_GPL();
#endif

/**
 *	netdev_is_rx_handler_busy - check if receive handler is registered
 *	@dev: device to check
 *
 *	Check if a receive handler is already registered for a given device.
 *	Return true if there one.
 *
 *	The caller must hold the rtnl_mutex.
 */
bool netdev_is_rx_handler_busy(struct net_device *dev)
{}
EXPORT_SYMBOL_GPL();

/**
 *	netdev_rx_handler_register - register receive handler
 *	@dev: device to register a handler for
 *	@rx_handler: receive handler to register
 *	@rx_handler_data: data pointer that is used by rx handler
 *
 *	Register a receive handler for a device. This handler will then be
 *	called from __netif_receive_skb. A negative errno code is returned
 *	on a failure.
 *
 *	The caller must hold the rtnl_mutex.
 *
 *	For a general description of rx_handler, see enum rx_handler_result.
 */
int netdev_rx_handler_register(struct net_device *dev,
			       rx_handler_func_t *rx_handler,
			       void *rx_handler_data)
{}
EXPORT_SYMBOL_GPL();

/**
 *	netdev_rx_handler_unregister - unregister receive handler
 *	@dev: device to unregister a handler from
 *
 *	Unregister a receive handler from a device.
 *
 *	The caller must hold the rtnl_mutex.
 */
void netdev_rx_handler_unregister(struct net_device *dev)
{}
EXPORT_SYMBOL_GPL();

/*
 * Limit the use of PFMEMALLOC reserves to those protocols that implement
 * the special handling of PFMEMALLOC skbs.
 */
static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
{}

static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
			     int *ret, struct net_device *orig_dev)
{}

static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
				    struct packet_type **ppt_prev)
{}

static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
{}

/**
 *	netif_receive_skb_core - special purpose version of netif_receive_skb
 *	@skb: buffer to process
 *
 *	More direct receive version of netif_receive_skb().  It should
 *	only be used by callers that have a need to skip RPS and Generic XDP.
 *	Caller must also take care of handling if ``(page_is_)pfmemalloc``.
 *
 *	This function may only be called from softirq context and interrupts
 *	should be enabled.
 *
 *	Return values (usually ignored):
 *	NET_RX_SUCCESS: no congestion
 *	NET_RX_DROP: packet was dropped
 */
int netif_receive_skb_core(struct sk_buff *skb)
{}
EXPORT_SYMBOL();

static inline void __netif_receive_skb_list_ptype(struct list_head *head,
						  struct packet_type *pt_prev,
						  struct net_device *orig_dev)
{}

static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
{}

static int __netif_receive_skb(struct sk_buff *skb)
{}

static void __netif_receive_skb_list(struct list_head *head)
{}

static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
{}

static int netif_receive_skb_internal(struct sk_buff *skb)
{}

void netif_receive_skb_list_internal(struct list_head *head)
{}

/**
 *	netif_receive_skb - process receive buffer from network
 *	@skb: buffer to process
 *
 *	netif_receive_skb() is the main receive data processing function.
 *	It always succeeds. The buffer may be dropped during processing
 *	for congestion control or by the protocol layers.
 *
 *	This function may only be called from softirq context and interrupts
 *	should be enabled.
 *
 *	Return values (usually ignored):
 *	NET_RX_SUCCESS: no congestion
 *	NET_RX_DROP: packet was dropped
 */
int netif_receive_skb(struct sk_buff *skb)
{}
EXPORT_SYMBOL();

/**
 *	netif_receive_skb_list - process many receive buffers from network
 *	@head: list of skbs to process.
 *
 *	Since return value of netif_receive_skb() is normally ignored, and
 *	wouldn't be meaningful for a list, this function returns void.
 *
 *	This function may only be called from softirq context and interrupts
 *	should be enabled.
 */
void netif_receive_skb_list(struct list_head *head)
{}
EXPORT_SYMBOL();

static DEFINE_PER_CPU(struct work_struct, flush_works);

/* Network device is going away, flush any packets still pending */
static void flush_backlog(struct work_struct *work)
{}

static bool flush_required(int cpu)
{}

static void flush_all_backlogs(void)
{}

static void net_rps_send_ipi(struct softnet_data *remsd)
{}

/*
 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
 * Note: called with local irq disabled, but exits with local irq enabled.
 */
static void net_rps_action_and_irq_enable(struct softnet_data *sd)
{}

static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
{}

static int process_backlog(struct napi_struct *napi, int quota)
{}

/**
 * __napi_schedule - schedule for receive
 * @n: entry to schedule
 *
 * The entry's receive function will be scheduled to run.
 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
 */
void __napi_schedule(struct napi_struct *n)
{}
EXPORT_SYMBOL();

/**
 *	napi_schedule_prep - check if napi can be scheduled
 *	@n: napi context
 *
 * Test if NAPI routine is already running, and if not mark
 * it as running.  This is used as a condition variable to
 * insure only one NAPI poll instance runs.  We also make
 * sure there is no pending NAPI disable.
 */
bool napi_schedule_prep(struct napi_struct *n)
{}
EXPORT_SYMBOL();

/**
 * __napi_schedule_irqoff - schedule for receive
 * @n: entry to schedule
 *
 * Variant of __napi_schedule() assuming hard irqs are masked.
 *
 * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
 * because the interrupt disabled assumption might not be true
 * due to force-threaded interrupts and spinlock substitution.
 */
void __napi_schedule_irqoff(struct napi_struct *n)
{}
EXPORT_SYMBOL();

bool napi_complete_done(struct napi_struct *n, int work_done)
{}
EXPORT_SYMBOL();

/* must be called under rcu_read_lock(), as we dont take a reference */
struct napi_struct *napi_by_id(unsigned int napi_id)
{}

static void skb_defer_free_flush(struct softnet_data *sd)
{}

#if defined(CONFIG_NET_RX_BUSY_POLL)

static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
{}

enum {};

static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
			   unsigned flags, u16 budget)
{}

static void __napi_busy_loop(unsigned int napi_id,
		      bool (*loop_end)(void *, unsigned long),
		      void *loop_end_arg, unsigned flags, u16 budget)
{}

void napi_busy_loop_rcu(unsigned int napi_id,
			bool (*loop_end)(void *, unsigned long),
			void *loop_end_arg, bool prefer_busy_poll, u16 budget)
{}

void napi_busy_loop(unsigned int napi_id,
		    bool (*loop_end)(void *, unsigned long),
		    void *loop_end_arg, bool prefer_busy_poll, u16 budget)
{}
EXPORT_SYMBOL();

#endif /* CONFIG_NET_RX_BUSY_POLL */

static void napi_hash_add(struct napi_struct *napi)
{}

/* Warning : caller is responsible to make sure rcu grace period
 * is respected before freeing memory containing @napi
 */
static void napi_hash_del(struct napi_struct *napi)
{}

static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
{}

static void init_gro_hash(struct napi_struct *napi)
{}

int dev_set_threaded(struct net_device *dev, bool threaded)
{}
EXPORT_SYMBOL();

/**
 * netif_queue_set_napi - Associate queue with the napi
 * @dev: device to which NAPI and queue belong
 * @queue_index: Index of queue
 * @type: queue type as RX or TX
 * @napi: NAPI context, pass NULL to clear previously set NAPI
 *
 * Set queue with its corresponding napi context. This should be done after
 * registering the NAPI handler for the queue-vector and the queues have been
 * mapped to the corresponding interrupt vector.
 */
void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
			  enum netdev_queue_type type, struct napi_struct *napi)
{}
EXPORT_SYMBOL();

void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
			   int (*poll)(struct napi_struct *, int), int weight)
{}
EXPORT_SYMBOL();

void napi_disable(struct napi_struct *n)
{}
EXPORT_SYMBOL();

/**
 *	napi_enable - enable NAPI scheduling
 *	@n: NAPI context
 *
 * Resume NAPI from being scheduled on this context.
 * Must be paired with napi_disable.
 */
void napi_enable(struct napi_struct *n)
{}
EXPORT_SYMBOL();

static void flush_gro_hash(struct napi_struct *napi)
{}

/* Must be called in process context */
void __netif_napi_del(struct napi_struct *napi)
{}
EXPORT_SYMBOL();

static int __napi_poll(struct napi_struct *n, bool *repoll)
{}

static int napi_poll(struct napi_struct *n, struct list_head *repoll)
{}

static int napi_thread_wait(struct napi_struct *napi)
{}

static void napi_threaded_poll_loop(struct napi_struct *napi)
{}

static int napi_threaded_poll(void *data)
{}

static __latent_entropy void net_rx_action(struct softirq_action *h)
{}

struct netdev_adjacent {};

static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
						 struct list_head *adj_list)
{}

static int ____netdev_has_upper_dev(struct net_device *upper_dev,
				    struct netdev_nested_priv *priv)
{}

/**
 * netdev_has_upper_dev - Check if device is linked to an upper device
 * @dev: device
 * @upper_dev: upper device to check
 *
 * Find out if a device is linked to specified upper device and return true
 * in case it is. Note that this checks only immediate upper device,
 * not through a complete stack of devices. The caller must hold the RTNL lock.
 */
bool netdev_has_upper_dev(struct net_device *dev,
			  struct net_device *upper_dev)
{}
EXPORT_SYMBOL();

/**
 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
 * @dev: device
 * @upper_dev: upper device to check
 *
 * Find out if a device is linked to specified upper device and return true
 * in case it is. Note that this checks the entire upper device chain.
 * The caller must hold rcu lock.
 */

bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
				  struct net_device *upper_dev)
{}
EXPORT_SYMBOL();

/**
 * netdev_has_any_upper_dev - Check if device is linked to some device
 * @dev: device
 *
 * Find out if a device is linked to an upper device and return true in case
 * it is. The caller must hold the RTNL lock.
 */
bool netdev_has_any_upper_dev(struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 * netdev_master_upper_dev_get - Get master upper device
 * @dev: device
 *
 * Find a master upper device and return pointer to it or NULL in case
 * it's not there. The caller must hold the RTNL lock.
 */
struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
{}
EXPORT_SYMBOL();

static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
{}

/**
 * netdev_has_any_lower_dev - Check if device is linked to some device
 * @dev: device
 *
 * Find out if a device is linked to a lower device and return true in case
 * it is. The caller must hold the RTNL lock.
 */
static bool netdev_has_any_lower_dev(struct net_device *dev)
{}

void *netdev_adjacent_get_private(struct list_head *adj_list)
{}
EXPORT_SYMBOL();

/**
 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
 * @dev: device
 * @iter: list_head ** of the current position
 *
 * Gets the next device from the dev's upper list, starting from iter
 * position. The caller must hold RCU read lock.
 */
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
						 struct list_head **iter)
{}
EXPORT_SYMBOL();

static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
						  struct list_head **iter,
						  bool *ignore)
{}

static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
						    struct list_head **iter)
{}

static int __netdev_walk_all_upper_dev(struct net_device *dev,
				       int (*fn)(struct net_device *dev,
					 struct netdev_nested_priv *priv),
				       struct netdev_nested_priv *priv)
{}

int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
				  int (*fn)(struct net_device *dev,
					    struct netdev_nested_priv *priv),
				  struct netdev_nested_priv *priv)
{}
EXPORT_SYMBOL_GPL();

static bool __netdev_has_upper_dev(struct net_device *dev,
				   struct net_device *upper_dev)
{}

/**
 * netdev_lower_get_next_private - Get the next ->private from the
 *				   lower neighbour list
 * @dev: device
 * @iter: list_head ** of the current position
 *
 * Gets the next netdev_adjacent->private from the dev's lower neighbour
 * list, starting from iter position. The caller must hold either hold the
 * RTNL lock or its own locking that guarantees that the neighbour lower
 * list will remain unchanged.
 */
void *netdev_lower_get_next_private(struct net_device *dev,
				    struct list_head **iter)
{}
EXPORT_SYMBOL();

/**
 * netdev_lower_get_next_private_rcu - Get the next ->private from the
 *				       lower neighbour list, RCU
 *				       variant
 * @dev: device
 * @iter: list_head ** of the current position
 *
 * Gets the next netdev_adjacent->private from the dev's lower neighbour
 * list, starting from iter position. The caller must hold RCU read lock.
 */
void *netdev_lower_get_next_private_rcu(struct net_device *dev,
					struct list_head **iter)
{}
EXPORT_SYMBOL();

/**
 * netdev_lower_get_next - Get the next device from the lower neighbour
 *                         list
 * @dev: device
 * @iter: list_head ** of the current position
 *
 * Gets the next netdev_adjacent from the dev's lower neighbour
 * list, starting from iter position. The caller must hold RTNL lock or
 * its own locking that guarantees that the neighbour lower
 * list will remain unchanged.
 */
void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
{}
EXPORT_SYMBOL();

static struct net_device *netdev_next_lower_dev(struct net_device *dev,
						struct list_head **iter)
{}

static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
						  struct list_head **iter,
						  bool *ignore)
{}

int netdev_walk_all_lower_dev(struct net_device *dev,
			      int (*fn)(struct net_device *dev,
					struct netdev_nested_priv *priv),
			      struct netdev_nested_priv *priv)
{}
EXPORT_SYMBOL_GPL();

static int __netdev_walk_all_lower_dev(struct net_device *dev,
				       int (*fn)(struct net_device *dev,
					 struct netdev_nested_priv *priv),
				       struct netdev_nested_priv *priv)
{}

struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
					     struct list_head **iter)
{}
EXPORT_SYMBOL();

static u8 __netdev_upper_depth(struct net_device *dev)
{}

static u8 __netdev_lower_depth(struct net_device *dev)
{}

static int __netdev_update_upper_level(struct net_device *dev,
				       struct netdev_nested_priv *__unused)
{}

#ifdef CONFIG_LOCKDEP
static LIST_HEAD(net_unlink_list);

static void net_unlink_todo(struct net_device *dev)
{}
#endif

static int __netdev_update_lower_level(struct net_device *dev,
				       struct netdev_nested_priv *priv)
{}

int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
				  int (*fn)(struct net_device *dev,
					    struct netdev_nested_priv *priv),
				  struct netdev_nested_priv *priv)
{}
EXPORT_SYMBOL_GPL();

/**
 * netdev_lower_get_first_private_rcu - Get the first ->private from the
 *				       lower neighbour list, RCU
 *				       variant
 * @dev: device
 *
 * Gets the first netdev_adjacent->private from the dev's lower neighbour
 * list. The caller must hold RCU read lock.
 */
void *netdev_lower_get_first_private_rcu(struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 * netdev_master_upper_dev_get_rcu - Get master upper device
 * @dev: device
 *
 * Find a master upper device and return pointer to it or NULL in case
 * it's not there. The caller must hold the RCU read lock.
 */
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
{}
EXPORT_SYMBOL();

static int netdev_adjacent_sysfs_add(struct net_device *dev,
			      struct net_device *adj_dev,
			      struct list_head *dev_list)
{}
static void netdev_adjacent_sysfs_del(struct net_device *dev,
			       char *name,
			       struct list_head *dev_list)
{}

static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
						 struct net_device *adj_dev,
						 struct list_head *dev_list)
{}

static int __netdev_adjacent_dev_insert(struct net_device *dev,
					struct net_device *adj_dev,
					struct list_head *dev_list,
					void *private, bool master)
{}

static void __netdev_adjacent_dev_remove(struct net_device *dev,
					 struct net_device *adj_dev,
					 u16 ref_nr,
					 struct list_head *dev_list)
{}

static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
					    struct net_device *upper_dev,
					    struct list_head *up_list,
					    struct list_head *down_list,
					    void *private, bool master)
{}

static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
					       struct net_device *upper_dev,
					       u16 ref_nr,
					       struct list_head *up_list,
					       struct list_head *down_list)
{}

static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
						struct net_device *upper_dev,
						void *private, bool master)
{}

static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
						   struct net_device *upper_dev)
{}

static int __netdev_upper_dev_link(struct net_device *dev,
				   struct net_device *upper_dev, bool master,
				   void *upper_priv, void *upper_info,
				   struct netdev_nested_priv *priv,
				   struct netlink_ext_ack *extack)
{}

/**
 * netdev_upper_dev_link - Add a link to the upper device
 * @dev: device
 * @upper_dev: new upper device
 * @extack: netlink extended ack
 *
 * Adds a link to device which is upper to this one. The caller must hold
 * the RTNL lock. On a failure a negative errno code is returned.
 * On success the reference counts are adjusted and the function
 * returns zero.
 */
int netdev_upper_dev_link(struct net_device *dev,
			  struct net_device *upper_dev,
			  struct netlink_ext_ack *extack)
{}
EXPORT_SYMBOL();

/**
 * netdev_master_upper_dev_link - Add a master link to the upper device
 * @dev: device
 * @upper_dev: new upper device
 * @upper_priv: upper device private
 * @upper_info: upper info to be passed down via notifier
 * @extack: netlink extended ack
 *
 * Adds a link to device which is upper to this one. In this case, only
 * one master upper device can be linked, although other non-master devices
 * might be linked as well. The caller must hold the RTNL lock.
 * On a failure a negative errno code is returned. On success the reference
 * counts are adjusted and the function returns zero.
 */
int netdev_master_upper_dev_link(struct net_device *dev,
				 struct net_device *upper_dev,
				 void *upper_priv, void *upper_info,
				 struct netlink_ext_ack *extack)
{}
EXPORT_SYMBOL();

static void __netdev_upper_dev_unlink(struct net_device *dev,
				      struct net_device *upper_dev,
				      struct netdev_nested_priv *priv)
{}

/**
 * netdev_upper_dev_unlink - Removes a link to upper device
 * @dev: device
 * @upper_dev: new upper device
 *
 * Removes a link to device which is upper to this one. The caller must hold
 * the RTNL lock.
 */
void netdev_upper_dev_unlink(struct net_device *dev,
			     struct net_device *upper_dev)
{}
EXPORT_SYMBOL();

static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
				      struct net_device *lower_dev,
				      bool val)
{}

static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
					struct net_device *lower_dev)
{}

static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
				       struct net_device *lower_dev)
{}

int netdev_adjacent_change_prepare(struct net_device *old_dev,
				   struct net_device *new_dev,
				   struct net_device *dev,
				   struct netlink_ext_ack *extack)
{}
EXPORT_SYMBOL();

void netdev_adjacent_change_commit(struct net_device *old_dev,
				   struct net_device *new_dev,
				   struct net_device *dev)
{}
EXPORT_SYMBOL();

void netdev_adjacent_change_abort(struct net_device *old_dev,
				  struct net_device *new_dev,
				  struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 * netdev_bonding_info_change - Dispatch event about slave change
 * @dev: device
 * @bonding_info: info to dispatch
 *
 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
 * The caller must hold the RTNL lock.
 */
void netdev_bonding_info_change(struct net_device *dev,
				struct netdev_bonding_info *bonding_info)
{}
EXPORT_SYMBOL();

static int netdev_offload_xstats_enable_l3(struct net_device *dev,
					   struct netlink_ext_ack *extack)
{}

int netdev_offload_xstats_enable(struct net_device *dev,
				 enum netdev_offload_xstats_type type,
				 struct netlink_ext_ack *extack)
{}
EXPORT_SYMBOL();

static void netdev_offload_xstats_disable_l3(struct net_device *dev)
{}

int netdev_offload_xstats_disable(struct net_device *dev,
				  enum netdev_offload_xstats_type type)
{}
EXPORT_SYMBOL();

static void netdev_offload_xstats_disable_all(struct net_device *dev)
{}

static struct rtnl_hw_stats64 *
netdev_offload_xstats_get_ptr(const struct net_device *dev,
			      enum netdev_offload_xstats_type type)
{}

bool netdev_offload_xstats_enabled(const struct net_device *dev,
				   enum netdev_offload_xstats_type type)
{}
EXPORT_SYMBOL();

struct netdev_notifier_offload_xstats_ru {};

struct netdev_notifier_offload_xstats_rd {};

static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest,
				  const struct rtnl_hw_stats64 *src)
{}

static int netdev_offload_xstats_get_used(struct net_device *dev,
					  enum netdev_offload_xstats_type type,
					  bool *p_used,
					  struct netlink_ext_ack *extack)
{}

static int netdev_offload_xstats_get_stats(struct net_device *dev,
					   enum netdev_offload_xstats_type type,
					   struct rtnl_hw_stats64 *p_stats,
					   bool *p_used,
					   struct netlink_ext_ack *extack)
{}

int netdev_offload_xstats_get(struct net_device *dev,
			      enum netdev_offload_xstats_type type,
			      struct rtnl_hw_stats64 *p_stats, bool *p_used,
			      struct netlink_ext_ack *extack)
{}
EXPORT_SYMBOL();

void
netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta,
				   const struct rtnl_hw_stats64 *stats)
{}
EXPORT_SYMBOL();

void
netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used)
{}
EXPORT_SYMBOL();

void netdev_offload_xstats_push_delta(struct net_device *dev,
				      enum netdev_offload_xstats_type type,
				      const struct rtnl_hw_stats64 *p_stats)
{}
EXPORT_SYMBOL();

/**
 * netdev_get_xmit_slave - Get the xmit slave of master device
 * @dev: device
 * @skb: The packet
 * @all_slaves: assume all the slaves are active
 *
 * The reference counters are not incremented so the caller must be
 * careful with locks. The caller must hold RCU lock.
 * %NULL is returned if no slave is found.
 */

struct net_device *netdev_get_xmit_slave(struct net_device *dev,
					 struct sk_buff *skb,
					 bool all_slaves)
{}
EXPORT_SYMBOL();

static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
						  struct sock *sk)
{}

/**
 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
 * @dev: device
 * @sk: the socket
 *
 * %NULL is returned if no lower device is found.
 */

struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
					    struct sock *sk)
{}
EXPORT_SYMBOL();

static void netdev_adjacent_add_links(struct net_device *dev)
{}

static void netdev_adjacent_del_links(struct net_device *dev)
{}

void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
{}

void *netdev_lower_dev_get_private(struct net_device *dev,
				   struct net_device *lower_dev)
{}
EXPORT_SYMBOL();


/**
 * netdev_lower_state_changed - Dispatch event about lower device state change
 * @lower_dev: device
 * @lower_state_info: state to dispatch
 *
 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
 * The caller must hold the RTNL lock.
 */
void netdev_lower_state_changed(struct net_device *lower_dev,
				void *lower_state_info)
{}
EXPORT_SYMBOL();

static void dev_change_rx_flags(struct net_device *dev, int flags)
{}

static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
{}

/**
 *	dev_set_promiscuity	- update promiscuity count on a device
 *	@dev: device
 *	@inc: modifier
 *
 *	Add or remove promiscuity from a device. While the count in the device
 *	remains above zero the interface remains promiscuous. Once it hits zero
 *	the device reverts back to normal filtering operation. A negative inc
 *	value is used to drop promiscuity on the device.
 *	Return 0 if successful or a negative errno code on error.
 */
int dev_set_promiscuity(struct net_device *dev, int inc)
{}
EXPORT_SYMBOL();

static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
{}

/**
 *	dev_set_allmulti	- update allmulti count on a device
 *	@dev: device
 *	@inc: modifier
 *
 *	Add or remove reception of all multicast frames to a device. While the
 *	count in the device remains above zero the interface remains listening
 *	to all interfaces. Once it hits zero the device reverts back to normal
 *	filtering operation. A negative @inc value is used to drop the counter
 *	when releasing a resource needing all multicasts.
 *	Return 0 if successful or a negative errno code on error.
 */

int dev_set_allmulti(struct net_device *dev, int inc)
{}
EXPORT_SYMBOL();

/*
 *	Upload unicast and multicast address lists to device and
 *	configure RX filtering. When the device doesn't support unicast
 *	filtering it is put in promiscuous mode while unicast addresses
 *	are present.
 */
void __dev_set_rx_mode(struct net_device *dev)
{}

void dev_set_rx_mode(struct net_device *dev)
{}

/**
 *	dev_get_flags - get flags reported to userspace
 *	@dev: device
 *
 *	Get the combination of flag bits exported through APIs to userspace.
 */
unsigned int dev_get_flags(const struct net_device *dev)
{}
EXPORT_SYMBOL();

int __dev_change_flags(struct net_device *dev, unsigned int flags,
		       struct netlink_ext_ack *extack)
{}

void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
			unsigned int gchanges, u32 portid,
			const struct nlmsghdr *nlh)
{}

/**
 *	dev_change_flags - change device settings
 *	@dev: device
 *	@flags: device state flags
 *	@extack: netlink extended ack
 *
 *	Change settings on device based state flags. The flags are
 *	in the userspace exported format.
 */
int dev_change_flags(struct net_device *dev, unsigned int flags,
		     struct netlink_ext_ack *extack)
{}
EXPORT_SYMBOL();

int __dev_set_mtu(struct net_device *dev, int new_mtu)
{}
EXPORT_SYMBOL();

int dev_validate_mtu(struct net_device *dev, int new_mtu,
		     struct netlink_ext_ack *extack)
{}

/**
 *	dev_set_mtu_ext - Change maximum transfer unit
 *	@dev: device
 *	@new_mtu: new transfer unit
 *	@extack: netlink extended ack
 *
 *	Change the maximum transfer size of the network device.
 */
int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
		    struct netlink_ext_ack *extack)
{}

int dev_set_mtu(struct net_device *dev, int new_mtu)
{}
EXPORT_SYMBOL();

/**
 *	dev_change_tx_queue_len - Change TX queue length of a netdevice
 *	@dev: device
 *	@new_len: new tx queue length
 */
int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
{}

/**
 *	dev_set_group - Change group this device belongs to
 *	@dev: device
 *	@new_group: group this device should belong to
 */
void dev_set_group(struct net_device *dev, int new_group)
{}

/**
 *	dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
 *	@dev: device
 *	@addr: new address
 *	@extack: netlink extended ack
 */
int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
			      struct netlink_ext_ack *extack)
{}
EXPORT_SYMBOL();

/**
 *	dev_set_mac_address - Change Media Access Control Address
 *	@dev: device
 *	@sa: new address
 *	@extack: netlink extended ack
 *
 *	Change the hardware (MAC) address of the device
 */
int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
			struct netlink_ext_ack *extack)
{}
EXPORT_SYMBOL();

DECLARE_RWSEM();

int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
			     struct netlink_ext_ack *extack)
{}
EXPORT_SYMBOL();

int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
{}
EXPORT_SYMBOL();

/**
 *	dev_change_carrier - Change device carrier
 *	@dev: device
 *	@new_carrier: new value
 *
 *	Change device carrier
 */
int dev_change_carrier(struct net_device *dev, bool new_carrier)
{}

/**
 *	dev_get_phys_port_id - Get device physical port ID
 *	@dev: device
 *	@ppid: port ID
 *
 *	Get device physical port ID
 */
int dev_get_phys_port_id(struct net_device *dev,
			 struct netdev_phys_item_id *ppid)
{}

/**
 *	dev_get_phys_port_name - Get device physical port name
 *	@dev: device
 *	@name: port name
 *	@len: limit of bytes to copy to name
 *
 *	Get device physical port name
 */
int dev_get_phys_port_name(struct net_device *dev,
			   char *name, size_t len)
{}

/**
 *	dev_get_port_parent_id - Get the device's port parent identifier
 *	@dev: network device
 *	@ppid: pointer to a storage for the port's parent identifier
 *	@recurse: allow/disallow recursion to lower devices
 *
 *	Get the devices's port parent identifier
 */
int dev_get_port_parent_id(struct net_device *dev,
			   struct netdev_phys_item_id *ppid,
			   bool recurse)
{}
EXPORT_SYMBOL();

/**
 *	netdev_port_same_parent_id - Indicate if two network devices have
 *	the same port parent identifier
 *	@a: first network device
 *	@b: second network device
 */
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
{}
EXPORT_SYMBOL();

/**
 *	dev_change_proto_down - set carrier according to proto_down.
 *
 *	@dev: device
 *	@proto_down: new value
 */
int dev_change_proto_down(struct net_device *dev, bool proto_down)
{}

/**
 *	dev_change_proto_down_reason - proto down reason
 *
 *	@dev: device
 *	@mask: proto down mask
 *	@value: proto down value
 */
void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
				  u32 value)
{}

struct bpf_xdp_link {};

static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
{}

static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
{}

static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
					 enum bpf_xdp_mode mode)
{}

static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
				     enum bpf_xdp_mode mode)
{}

u8 dev_xdp_prog_count(struct net_device *dev)
{}
EXPORT_SYMBOL_GPL();

u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
{}

static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
			     struct bpf_xdp_link *link)
{}

static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
			     struct bpf_prog *prog)
{}

static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
			   bpf_op_t bpf_op, struct netlink_ext_ack *extack,
			   u32 flags, struct bpf_prog *prog)
{}

static void dev_xdp_uninstall(struct net_device *dev)
{}

static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
			  struct bpf_xdp_link *link, struct bpf_prog *new_prog,
			  struct bpf_prog *old_prog, u32 flags)
{}

static int dev_xdp_attach_link(struct net_device *dev,
			       struct netlink_ext_ack *extack,
			       struct bpf_xdp_link *link)
{}

static int dev_xdp_detach_link(struct net_device *dev,
			       struct netlink_ext_ack *extack,
			       struct bpf_xdp_link *link)
{}

static void bpf_xdp_link_release(struct bpf_link *link)
{}

static int bpf_xdp_link_detach(struct bpf_link *link)
{}

static void bpf_xdp_link_dealloc(struct bpf_link *link)
{}

static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
				     struct seq_file *seq)
{}

static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
				       struct bpf_link_info *info)
{}

static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
			       struct bpf_prog *old_prog)
{}

static const struct bpf_link_ops bpf_xdp_link_lops =;

int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{}

/**
 *	dev_change_xdp_fd - set or clear a bpf program for a device rx path
 *	@dev: device
 *	@extack: netlink extended ack
 *	@fd: new program fd or negative value to clear
 *	@expected_fd: old program fd that userspace expects to replace or clear
 *	@flags: xdp-related flags
 *
 *	Set or clear a bpf program for a device
 */
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
		      int fd, int expected_fd, u32 flags)
{}

/**
 * dev_index_reserve() - allocate an ifindex in a namespace
 * @net: the applicable net namespace
 * @ifindex: requested ifindex, pass %0 to get one allocated
 *
 * Allocate a ifindex for a new device. Caller must either use the ifindex
 * to store the device (via list_netdevice()) or call dev_index_release()
 * to give the index up.
 *
 * Return: a suitable unique value for a new device interface number or -errno.
 */
static int dev_index_reserve(struct net *net, u32 ifindex)
{}

static void dev_index_release(struct net *net, int ifindex)
{}

/* Delayed registration/unregisteration */
LIST_HEAD();
DECLARE_WAIT_QUEUE_HEAD();
atomic_t dev_unreg_count =;

static void net_set_todo(struct net_device *dev)
{}

static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
	struct net_device *upper, netdev_features_t features)
{}

static void netdev_sync_lower_features(struct net_device *upper,
	struct net_device *lower, netdev_features_t features)
{}

static netdev_features_t netdev_fix_features(struct net_device *dev,
	netdev_features_t features)
{}

int __netdev_update_features(struct net_device *dev)
{}

/**
 *	netdev_update_features - recalculate device features
 *	@dev: the device to check
 *
 *	Recalculate dev->features set and send notifications if it
 *	has changed. Should be called after driver or hardware dependent
 *	conditions might have changed that influence the features.
 */
void netdev_update_features(struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 *	netdev_change_features - recalculate device features
 *	@dev: the device to check
 *
 *	Recalculate dev->features set and send notifications even
 *	if they have not changed. Should be called instead of
 *	netdev_update_features() if also dev->vlan_features might
 *	have changed to allow the changes to be propagated to stacked
 *	VLAN devices.
 */
void netdev_change_features(struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 *	netif_stacked_transfer_operstate -	transfer operstate
 *	@rootdev: the root or lower level device to transfer state from
 *	@dev: the device to transfer operstate to
 *
 *	Transfer operational state from root to device. This is normally
 *	called when a stacking relationship exists between the root
 *	device and the device(a leaf device).
 */
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
					struct net_device *dev)
{}
EXPORT_SYMBOL();

static int netif_alloc_rx_queues(struct net_device *dev)
{}

static void netif_free_rx_queues(struct net_device *dev)
{}

static void netdev_init_one_queue(struct net_device *dev,
				  struct netdev_queue *queue, void *_unused)
{}

static void netif_free_tx_queues(struct net_device *dev)
{}

static int netif_alloc_netdev_queues(struct net_device *dev)
{}

void netif_tx_stop_all_queues(struct net_device *dev)
{}
EXPORT_SYMBOL();

static int netdev_do_alloc_pcpu_stats(struct net_device *dev)
{}

static void netdev_do_free_pcpu_stats(struct net_device *dev)
{}

/**
 * register_netdevice() - register a network device
 * @dev: device to register
 *
 * Take a prepared network device structure and make it externally accessible.
 * A %NETDEV_REGISTER message is sent to the netdev notifier chain.
 * Callers must hold the rtnl lock - you may want register_netdev()
 * instead of this.
 */
int register_netdevice(struct net_device *dev)
{}
EXPORT_SYMBOL();

/* Initialize the core of a dummy net device.
 * This is useful if you are calling this function after alloc_netdev(),
 * since it does not memset the net_device fields.
 */
static void init_dummy_netdev_core(struct net_device *dev)
{}

/**
 *	init_dummy_netdev	- init a dummy network device for NAPI
 *	@dev: device to init
 *
 *	This takes a network device structure and initializes the minimum
 *	amount of fields so it can be used to schedule NAPI polls without
 *	registering a full blown interface. This is to be used by drivers
 *	that need to tie several hardware interfaces to a single NAPI
 *	poll scheduler due to HW limitations.
 */
void init_dummy_netdev(struct net_device *dev)
{}
EXPORT_SYMBOL_GPL();

/**
 *	register_netdev	- register a network device
 *	@dev: device to register
 *
 *	Take a completed network device structure and add it to the kernel
 *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
 *	chain. 0 is returned on success. A negative errno code is returned
 *	on a failure to set up the device, or if the name is a duplicate.
 *
 *	This is a wrapper around register_netdevice that takes the rtnl semaphore
 *	and expands the device name if you passed a format string to
 *	alloc_netdev.
 */
int register_netdev(struct net_device *dev)
{}
EXPORT_SYMBOL();

int netdev_refcnt_read(const struct net_device *dev)
{}
EXPORT_SYMBOL();

int netdev_unregister_timeout_secs __read_mostly =;

#define WAIT_REFS_MIN_MSECS
#define WAIT_REFS_MAX_MSECS
/**
 * netdev_wait_allrefs_any - wait until all references are gone.
 * @list: list of net_devices to wait on
 *
 * This is called when unregistering network devices.
 *
 * Any protocol or device that holds a reference should register
 * for netdevice notification, and cleanup and put back the
 * reference if they receive an UNREGISTER event.
 * We can get stuck here if buggy protocols don't correctly
 * call dev_put.
 */
static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
{}

/* The sequence is:
 *
 *	rtnl_lock();
 *	...
 *	register_netdevice(x1);
 *	register_netdevice(x2);
 *	...
 *	unregister_netdevice(y1);
 *	unregister_netdevice(y2);
 *      ...
 *	rtnl_unlock();
 *	free_netdev(y1);
 *	free_netdev(y2);
 *
 * We are invoked by rtnl_unlock().
 * This allows us to deal with problems:
 * 1) We can delete sysfs objects which invoke hotplug
 *    without deadlocking with linkwatch via keventd.
 * 2) Since we run with the RTNL semaphore not held, we can sleep
 *    safely in order to wait for the netdev refcnt to drop to zero.
 *
 * We must not return until all unregister events added during
 * the interval the lock was held have been completed.
 */
void netdev_run_todo(void)
{}

/* Collate per-cpu network dstats statistics
 *
 * Read per-cpu network statistics from dev->dstats and populate the related
 * fields in @s.
 */
static void dev_fetch_dstats(struct rtnl_link_stats64 *s,
			     const struct pcpu_dstats __percpu *dstats)
{}

/* ndo_get_stats64 implementation for dtstats-based accounting.
 *
 * Populate @s from dev->stats and dev->dstats. This is used internally by the
 * core for NETDEV_PCPU_STAT_DSTAT-type stats collection.
 */
static void dev_get_dstats64(const struct net_device *dev,
			     struct rtnl_link_stats64 *s)
{}

/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
 * all the same fields in the same order as net_device_stats, with only
 * the type differing, but rtnl_link_stats64 may have additional fields
 * at the end for newer counters.
 */
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
			     const struct net_device_stats *netdev_stats)
{}
EXPORT_SYMBOL();

static __cold struct net_device_core_stats __percpu *netdev_core_stats_alloc(
		struct net_device *dev)
{}

noinline void netdev_core_stats_inc(struct net_device *dev, u32 offset)
{}
EXPORT_SYMBOL_GPL();

/**
 *	dev_get_stats	- get network device statistics
 *	@dev: device to get statistics from
 *	@storage: place to store stats
 *
 *	Get network statistics from device. Return @storage.
 *	The device driver may provide its own method by setting
 *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
 *	otherwise the internal statistics structure is used.
 */
struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
					struct rtnl_link_stats64 *storage)
{}
EXPORT_SYMBOL();

/**
 *	dev_fetch_sw_netstats - get per-cpu network device statistics
 *	@s: place to store stats
 *	@netstats: per-cpu network stats to read from
 *
 *	Read per-cpu network statistics and populate the related fields in @s.
 */
void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
			   const struct pcpu_sw_netstats __percpu *netstats)
{}
EXPORT_SYMBOL_GPL();

/**
 *	dev_get_tstats64 - ndo_get_stats64 implementation
 *	@dev: device to get statistics from
 *	@s: place to store stats
 *
 *	Populate @s from dev->stats and dev->tstats. Can be used as
 *	ndo_get_stats64() callback.
 */
void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
{}
EXPORT_SYMBOL_GPL();

struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
{}

static const struct ethtool_ops default_ethtool_ops;

void netdev_set_default_ethtool_ops(struct net_device *dev,
				    const struct ethtool_ops *ops)
{}
EXPORT_SYMBOL_GPL();

/**
 * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default
 * @dev: netdev to enable the IRQ coalescing on
 *
 * Sets a conservative default for SW IRQ coalescing. Users can use
 * sysfs attributes to override the default values.
 */
void netdev_sw_irq_coalesce_default_on(struct net_device *dev)
{}
EXPORT_SYMBOL_GPL();

/**
 * alloc_netdev_mqs - allocate network device
 * @sizeof_priv: size of private data to allocate space for
 * @name: device name format string
 * @name_assign_type: origin of device name
 * @setup: callback to initialize device
 * @txqs: the number of TX subqueues to allocate
 * @rxqs: the number of RX subqueues to allocate
 *
 * Allocates a struct net_device with private data area for driver use
 * and performs basic initialization.  Also allocates subqueue structs
 * for each queue on the device.
 */
struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
		unsigned char name_assign_type,
		void (*setup)(struct net_device *),
		unsigned int txqs, unsigned int rxqs)
{}
EXPORT_SYMBOL();

/**
 * free_netdev - free network device
 * @dev: device
 *
 * This function does the last stage of destroying an allocated device
 * interface. The reference to the device object is released. If this
 * is the last reference then it will be freed.Must be called in process
 * context.
 */
void free_netdev(struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 * alloc_netdev_dummy - Allocate and initialize a dummy net device.
 * @sizeof_priv: size of private data to allocate space for
 *
 * Return: the allocated net_device on success, NULL otherwise
 */
struct net_device *alloc_netdev_dummy(int sizeof_priv)
{}
EXPORT_SYMBOL_GPL();

/**
 *	synchronize_net -  Synchronize with packet receive processing
 *
 *	Wait for packets currently being received to be done.
 *	Does not block later packets from starting.
 */
void synchronize_net(void)
{}
EXPORT_SYMBOL();

static void netdev_rss_contexts_free(struct net_device *dev)
{}

/**
 *	unregister_netdevice_queue - remove device from the kernel
 *	@dev: device
 *	@head: list
 *
 *	This function shuts down a device interface and removes it
 *	from the kernel tables.
 *	If head not NULL, device is queued to be unregistered later.
 *
 *	Callers must hold the rtnl semaphore.  You may want
 *	unregister_netdev() instead of this.
 */

void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
{}
EXPORT_SYMBOL();

void unregister_netdevice_many_notify(struct list_head *head,
				      u32 portid, const struct nlmsghdr *nlh)
{}

/**
 *	unregister_netdevice_many - unregister many devices
 *	@head: list of devices
 *
 *  Note: As most callers use a stack allocated list_head,
 *  we force a list_del() to make sure stack wont be corrupted later.
 */
void unregister_netdevice_many(struct list_head *head)
{}
EXPORT_SYMBOL();

/**
 *	unregister_netdev - remove device from the kernel
 *	@dev: device
 *
 *	This function shuts down a device interface and removes it
 *	from the kernel tables.
 *
 *	This is just a wrapper for unregister_netdevice that takes
 *	the rtnl semaphore.  In general you want to use this and not
 *	unregister_netdevice.
 */
void unregister_netdev(struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 *	__dev_change_net_namespace - move device to different nethost namespace
 *	@dev: device
 *	@net: network namespace
 *	@pat: If not NULL name pattern to try if the current device name
 *	      is already taken in the destination network namespace.
 *	@new_ifindex: If not zero, specifies device index in the target
 *	              namespace.
 *
 *	This function shuts down a device interface and moves it
 *	to a new network namespace. On success 0 is returned, on
 *	a failure a netagive errno code is returned.
 *
 *	Callers must hold the rtnl semaphore.
 */

int __dev_change_net_namespace(struct net_device *dev, struct net *net,
			       const char *pat, int new_ifindex)
{}
EXPORT_SYMBOL_GPL();

static int dev_cpu_dead(unsigned int oldcpu)
{}

/**
 *	netdev_increment_features - increment feature set by one
 *	@all: current feature set
 *	@one: new feature set
 *	@mask: mask feature set
 *
 *	Computes a new feature set after adding a device with feature set
 *	@one to the master device with current feature set @all.  Will not
 *	enable anything that is off in @mask. Returns the new feature set.
 */
netdev_features_t netdev_increment_features(netdev_features_t all,
	netdev_features_t one, netdev_features_t mask)
{}
EXPORT_SYMBOL();

static struct hlist_head * __net_init netdev_create_hash(void)
{}

/* Initialize per network namespace state */
static int __net_init netdev_init(struct net *net)
{}

/**
 *	netdev_drivername - network driver for the device
 *	@dev: network device
 *
 *	Determine network driver for device.
 */
const char *netdev_drivername(const struct net_device *dev)
{}

static void __netdev_printk(const char *level, const struct net_device *dev,
			    struct va_format *vaf)
{}

void netdev_printk(const char *level, const struct net_device *dev,
		   const char *format, ...)
{}
EXPORT_SYMBOL();

#define define_netdev_printk_level(func, level)

define_netdev_printk_level(netdev_emerg, KERN_EMERG);
define_netdev_printk_level(netdev_alert, KERN_ALERT);
define_netdev_printk_level(netdev_crit, KERN_CRIT);
define_netdev_printk_level(netdev_err, KERN_ERR);
define_netdev_printk_level(netdev_warn, KERN_WARNING);
define_netdev_printk_level(netdev_notice, KERN_NOTICE);
define_netdev_printk_level(netdev_info, KERN_INFO);

static void __net_exit netdev_exit(struct net *net)
{}

static struct pernet_operations __net_initdata netdev_net_ops =;

static void __net_exit default_device_exit_net(struct net *net)
{}

static void __net_exit default_device_exit_batch(struct list_head *net_list)
{}

static struct pernet_operations __net_initdata default_device_ops =;

static void __init net_dev_struct_check(void)
{}

/*
 *	Initialize the DEV module. At boot time this walks the device list and
 *	unhooks any devices that fail to initialise (normally hardware not
 *	present) and leaves us with a valid list of present and active devices.
 *
 */

/* We allocate 256 pages for each CPU if PAGE_SHIFT is 12 */
#define SYSTEM_PERCPU_PAGE_POOL_SIZE

static int net_page_pool_create(int cpuid)
{}

static int backlog_napi_should_run(unsigned int cpu)
{}

static void run_backlog_napi(unsigned int cpu)
{}

static void backlog_napi_setup(unsigned int cpu)
{}

static struct smp_hotplug_thread backlog_threads =;

/*
 *       This is called single threaded during boot, so no need
 *       to take the rtnl semaphore.
 */
static int __init net_dev_init(void)
{}

subsys_initcall(net_dev_init);