#include <linux/uaccess.h>
#include <linux/bitmap.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/hash.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/isolation.h>
#include <linux/sched/mm.h>
#include <linux/smpboot.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <linux/kthread.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/busy_poll.h>
#include <linux/rtnetlink.h>
#include <linux/stat.h>
#include <net/dsa.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/gro.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <net/tcx.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netpoll.h>
#include <linux/rcupdate.h>
#include <linux/delay.h>
#include <net/iw_handler.h>
#include <asm/current.h>
#include <linux/audit.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <net/mpls.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <trace/events/napi.h>
#include <trace/events/net.h>
#include <trace/events/skb.h>
#include <trace/events/qdisc.h>
#include <trace/events/xdp.h>
#include <linux/inetdevice.h>
#include <linux/cpu_rmap.h>
#include <linux/static_key.h>
#include <linux/hashtable.h>
#include <linux/vmalloc.h>
#include <linux/if_macvlan.h>
#include <linux/errqueue.h>
#include <linux/hrtimer.h>
#include <linux/netfilter_netdev.h>
#include <linux/crash_dump.h>
#include <linux/sctp.h>
#include <net/udp_tunnel.h>
#include <linux/net_namespace.h>
#include <linux/indirect_call_wrapper.h>
#include <net/devlink.h>
#include <linux/pm_runtime.h>
#include <linux/prandom.h>
#include <linux/once_lite.h>
#include <net/netdev_rx_queue.h>
#include <net/page_pool/types.h>
#include <net/page_pool/helpers.h>
#include <net/rps.h>
#include "dev.h"
#include "net-sysfs.h"
static DEFINE_SPINLOCK(ptype_lock);
struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
static int netif_rx_internal(struct sk_buff *skb);
static int call_netdevice_notifiers_extack(unsigned long val,
struct net_device *dev,
struct netlink_ext_ack *extack);
static DEFINE_MUTEX(ifalias_mutex);
static DEFINE_SPINLOCK(napi_hash_lock);
static unsigned int napi_gen_id = …;
static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
static DECLARE_RWSEM(devnet_rename_sem);
static inline void dev_base_seq_inc(struct net *net)
{ … }
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
{ … }
static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
{ … }
#ifndef CONFIG_PREEMPT_RT
static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key);
static int __init setup_backlog_napi_threads(char *arg)
{ … }
early_param(…);
static bool use_backlog_threads(void)
{ … }
#else
static bool use_backlog_threads(void)
{
return true;
}
#endif
static inline void backlog_lock_irq_save(struct softnet_data *sd,
unsigned long *flags)
{ … }
static inline void backlog_lock_irq_disable(struct softnet_data *sd)
{ … }
static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
unsigned long *flags)
{ … }
static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
{ … }
static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
const char *name)
{ … }
static struct netdev_name_node *
netdev_name_node_head_alloc(struct net_device *dev)
{ … }
static void netdev_name_node_free(struct netdev_name_node *name_node)
{ … }
static void netdev_name_node_add(struct net *net,
struct netdev_name_node *name_node)
{ … }
static void netdev_name_node_del(struct netdev_name_node *name_node)
{ … }
static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
const char *name)
{ … }
static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
const char *name)
{ … }
bool netdev_name_in_use(struct net *net, const char *name)
{ … }
EXPORT_SYMBOL(…);
int netdev_name_node_alt_create(struct net_device *dev, const char *name)
{ … }
static void netdev_name_node_alt_free(struct rcu_head *head)
{ … }
static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
{ … }
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
{ … }
static void netdev_name_node_alt_flush(struct net_device *dev)
{ … }
static void list_netdevice(struct net_device *dev)
{ … }
static void unlist_netdevice(struct net_device *dev)
{ … }
static RAW_NOTIFIER_HEAD(netdev_chain);
DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) = …;
EXPORT_PER_CPU_SYMBOL(…);
static DEFINE_PER_CPU(struct page_pool *, system_page_pool);
#ifdef CONFIG_LOCKDEP
static const unsigned short netdev_lock_type[] = …;
static const char *const netdev_lock_name[] = …;
static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
static inline unsigned short netdev_lock_pos(unsigned short dev_type)
{ … }
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
unsigned short dev_type)
{ … }
static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
{ … }
#else
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
unsigned short dev_type)
{
}
static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
{
}
#endif
static inline struct list_head *ptype_head(const struct packet_type *pt)
{ … }
void dev_add_pack(struct packet_type *pt)
{ … }
EXPORT_SYMBOL(…);
void __dev_remove_pack(struct packet_type *pt)
{ … }
EXPORT_SYMBOL(…);
void dev_remove_pack(struct packet_type *pt)
{ … }
EXPORT_SYMBOL(…);
int dev_get_iflink(const struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
{ … }
EXPORT_SYMBOL_GPL(…);
static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
{ … }
int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
struct net_device_path_stack *stack)
{ … }
EXPORT_SYMBOL_GPL(…);
struct net_device *__dev_get_by_name(struct net *net, const char *name)
{ … }
EXPORT_SYMBOL(…);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
{ … }
EXPORT_SYMBOL(…);
struct net_device *dev_get_by_name(struct net *net, const char *name)
{ … }
EXPORT_SYMBOL(…);
struct net_device *netdev_get_by_name(struct net *net, const char *name,
netdevice_tracker *tracker, gfp_t gfp)
{ … }
EXPORT_SYMBOL(…);
struct net_device *__dev_get_by_index(struct net *net, int ifindex)
{ … }
EXPORT_SYMBOL(…);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
{ … }
EXPORT_SYMBOL(…);
struct net_device *dev_get_by_index(struct net *net, int ifindex)
{ … }
EXPORT_SYMBOL(…);
struct net_device *netdev_get_by_index(struct net *net, int ifindex,
netdevice_tracker *tracker, gfp_t gfp)
{ … }
EXPORT_SYMBOL(…);
struct net_device *dev_get_by_napi_id(unsigned int napi_id)
{ … }
EXPORT_SYMBOL(…);
static DEFINE_SEQLOCK(netdev_rename_lock);
void netdev_copy_name(struct net_device *dev, char *name)
{ … }
int netdev_get_name(struct net *net, char *name, int ifindex)
{ … }
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
const char *ha)
{ … }
EXPORT_SYMBOL(…);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
{ … }
EXPORT_SYMBOL(…);
struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
unsigned short mask)
{ … }
EXPORT_SYMBOL(…);
bool dev_valid_name(const char *name)
{ … }
EXPORT_SYMBOL(…);
static int __dev_alloc_name(struct net *net, const char *name, char *res)
{ … }
static int dev_prep_valid_name(struct net *net, struct net_device *dev,
const char *want_name, char *out_name,
int dup_errno)
{ … }
int dev_alloc_name(struct net_device *dev, const char *name)
{ … }
EXPORT_SYMBOL(…);
static int dev_get_valid_name(struct net *net, struct net_device *dev,
const char *name)
{ … }
int dev_change_name(struct net_device *dev, const char *newname)
{ … }
int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
{ … }
EXPORT_SYMBOL(…);
int dev_get_alias(const struct net_device *dev, char *name, size_t len)
{ … }
void netdev_features_change(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
void netdev_state_change(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
void __netdev_notify_peers(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
void netdev_notify_peers(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
static int napi_threaded_poll(void *data);
static int napi_kthread_create(struct napi_struct *n)
{ … }
static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
{ … }
int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
{ … }
EXPORT_SYMBOL(…);
static void __dev_close_many(struct list_head *head)
{ … }
static void __dev_close(struct net_device *dev)
{ … }
void dev_close_many(struct list_head *head, bool unlink)
{ … }
EXPORT_SYMBOL(…);
void dev_close(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
void dev_disable_lro(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
static void dev_disable_gro_hw(struct net_device *dev)
{ … }
const char *netdev_cmd_to_name(enum netdev_cmd cmd)
{ … }
EXPORT_SYMBOL_GPL(…);
static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
struct net_device *dev)
{ … }
static int call_netdevice_register_notifiers(struct notifier_block *nb,
struct net_device *dev)
{ … }
static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
struct net_device *dev)
{ … }
static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
struct net *net)
{ … }
static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
struct net *net)
{ … }
static int dev_boot_phase = …;
int register_netdevice_notifier(struct notifier_block *nb)
{ … }
EXPORT_SYMBOL(…);
int unregister_netdevice_notifier(struct notifier_block *nb)
{ … }
EXPORT_SYMBOL(…);
static int __register_netdevice_notifier_net(struct net *net,
struct notifier_block *nb,
bool ignore_call_fail)
{ … }
static int __unregister_netdevice_notifier_net(struct net *net,
struct notifier_block *nb)
{ … }
int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
{ … }
EXPORT_SYMBOL(…);
int unregister_netdevice_notifier_net(struct net *net,
struct notifier_block *nb)
{ … }
EXPORT_SYMBOL(…);
static void __move_netdevice_notifier_net(struct net *src_net,
struct net *dst_net,
struct notifier_block *nb)
{ … }
int register_netdevice_notifier_dev_net(struct net_device *dev,
struct notifier_block *nb,
struct netdev_net_notifier *nn)
{ … }
EXPORT_SYMBOL(…);
int unregister_netdevice_notifier_dev_net(struct net_device *dev,
struct notifier_block *nb,
struct netdev_net_notifier *nn)
{ … }
EXPORT_SYMBOL(…);
static void move_netdevice_notifiers_dev_net(struct net_device *dev,
struct net *net)
{ … }
int call_netdevice_notifiers_info(unsigned long val,
struct netdev_notifier_info *info)
{ … }
static int
call_netdevice_notifiers_info_robust(unsigned long val_up,
unsigned long val_down,
struct netdev_notifier_info *info)
{ … }
static int call_netdevice_notifiers_extack(unsigned long val,
struct net_device *dev,
struct netlink_ext_ack *extack)
{ … }
int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
static int call_netdevice_notifiers_mtu(unsigned long val,
struct net_device *dev, u32 arg)
{ … }
#ifdef CONFIG_NET_INGRESS
static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
void net_inc_ingress_queue(void)
{ … }
EXPORT_SYMBOL_GPL(…);
void net_dec_ingress_queue(void)
{ … }
EXPORT_SYMBOL_GPL(…);
#endif
#ifdef CONFIG_NET_EGRESS
static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
void net_inc_egress_queue(void)
{ … }
EXPORT_SYMBOL_GPL(…);
void net_dec_egress_queue(void)
{ … }
EXPORT_SYMBOL_GPL(…);
#endif
#ifdef CONFIG_NET_CLS_ACT
DEFINE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key);
EXPORT_SYMBOL(…);
#endif
DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
EXPORT_SYMBOL(…);
#ifdef CONFIG_JUMP_LABEL
static atomic_t netstamp_needed_deferred;
static atomic_t netstamp_wanted;
static void netstamp_clear(struct work_struct *work)
{ … }
static DECLARE_WORK(netstamp_work, netstamp_clear);
#endif
void net_enable_timestamp(void)
{ … }
EXPORT_SYMBOL(…);
void net_disable_timestamp(void)
{ … }
EXPORT_SYMBOL(…);
static inline void net_timestamp_set(struct sk_buff *skb)
{ … }
#define net_timestamp_check(COND, SKB) … \
bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
{ … }
EXPORT_SYMBOL_GPL(…);
static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
bool check_mtu)
{ … }
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{ … }
EXPORT_SYMBOL_GPL(…);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{ … }
EXPORT_SYMBOL_GPL(…);
int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
{ … }
static inline int deliver_skb(struct sk_buff *skb,
struct packet_type *pt_prev,
struct net_device *orig_dev)
{ … }
static inline void deliver_ptype_list_skb(struct sk_buff *skb,
struct packet_type **pt,
struct net_device *orig_dev,
__be16 type,
struct list_head *ptype_list)
{ … }
static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
{ … }
bool dev_nit_active(struct net_device *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
static void netif_setup_tc(struct net_device *dev, unsigned int txq)
{ … }
int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
{ … }
EXPORT_SYMBOL(…);
#ifdef CONFIG_XPS
static struct static_key xps_needed __read_mostly;
static struct static_key xps_rxqs_needed __read_mostly;
static DEFINE_MUTEX(xps_map_mutex);
#define xmap_dereference(P) …
static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
struct xps_dev_maps *old_maps, int tci, u16 index)
{ … }
static bool remove_xps_queue_cpu(struct net_device *dev,
struct xps_dev_maps *dev_maps,
int cpu, u16 offset, u16 count)
{ … }
static void reset_xps_maps(struct net_device *dev,
struct xps_dev_maps *dev_maps,
enum xps_map_type type)
{ … }
static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
u16 offset, u16 count)
{ … }
static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
u16 count)
{ … }
static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
{ … }
static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
u16 index, bool is_rxqs_map)
{ … }
static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
struct xps_dev_maps *new_dev_maps, int index,
int tc, bool skip_tc)
{ … }
int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
u16 index, enum xps_map_type type)
{ … }
EXPORT_SYMBOL_GPL(…);
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
u16 index)
{ … }
EXPORT_SYMBOL(…);
#endif
static void netdev_unbind_all_sb_channels(struct net_device *dev)
{ … }
void netdev_reset_tc(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
{ … }
EXPORT_SYMBOL(…);
int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
{ … }
EXPORT_SYMBOL(…);
void netdev_unbind_sb_channel(struct net_device *dev,
struct net_device *sb_dev)
{ … }
EXPORT_SYMBOL(…);
int netdev_bind_sb_channel_queue(struct net_device *dev,
struct net_device *sb_dev,
u8 tc, u16 count, u16 offset)
{ … }
EXPORT_SYMBOL(…);
int netdev_set_sb_channel(struct net_device *dev, u16 channel)
{ … }
EXPORT_SYMBOL(…);
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
{ … }
EXPORT_SYMBOL(…);
#ifdef CONFIG_SYSFS
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
{ … }
EXPORT_SYMBOL(…);
#endif
int netif_set_real_num_queues(struct net_device *dev,
unsigned int txq, unsigned int rxq)
{ … }
EXPORT_SYMBOL(…);
void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
{ … }
EXPORT_SYMBOL(…);
void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs)
{ … }
EXPORT_SYMBOL(…);
void netif_inherit_tso_max(struct net_device *to, const struct net_device *from)
{ … }
EXPORT_SYMBOL(…);
int netif_get_num_default_rss_queues(void)
{ … }
EXPORT_SYMBOL(…);
static void __netif_reschedule(struct Qdisc *q)
{ … }
void __netif_schedule(struct Qdisc *q)
{ … }
EXPORT_SYMBOL(…);
struct dev_kfree_skb_cb { … };
static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
{ … }
void netif_schedule_queue(struct netdev_queue *txq)
{ … }
EXPORT_SYMBOL(…);
void netif_tx_wake_queue(struct netdev_queue *dev_queue)
{ … }
EXPORT_SYMBOL(…);
void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason)
{ … }
EXPORT_SYMBOL(…);
void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason)
{ … }
EXPORT_SYMBOL(…);
void netif_device_detach(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
void netif_device_attach(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
static u16 skb_tx_hash(const struct net_device *dev,
const struct net_device *sb_dev,
struct sk_buff *skb)
{ … }
void skb_warn_bad_offload(const struct sk_buff *skb)
{ … }
int skb_checksum_help(struct sk_buff *skb)
{ … }
EXPORT_SYMBOL(…);
int skb_crc32c_csum_help(struct sk_buff *skb)
{ … }
__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
{ … }
#ifdef CONFIG_BUG
static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
{ … }
void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
{ … }
EXPORT_SYMBOL(…);
#endif
static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
{ … }
#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
static netdev_features_t net_mpls_features(struct sk_buff *skb,
netdev_features_t features,
__be16 type)
{ … }
#else
static netdev_features_t net_mpls_features(struct sk_buff *skb,
netdev_features_t features,
__be16 type)
{
return features;
}
#endif
static netdev_features_t harmonize_features(struct sk_buff *skb,
netdev_features_t features)
{ … }
netdev_features_t passthru_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{ … }
EXPORT_SYMBOL(…);
static netdev_features_t dflt_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{ … }
static netdev_features_t gso_features_check(const struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{ … }
netdev_features_t netif_skb_features(struct sk_buff *skb)
{ … }
EXPORT_SYMBOL(…);
static int xmit_one(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, bool more)
{ … }
struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
struct netdev_queue *txq, int *ret)
{ … }
static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
netdev_features_t features)
{ … }
int skb_csum_hwoffload_help(struct sk_buff *skb,
const netdev_features_t features)
{ … }
EXPORT_SYMBOL(…);
static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
{ … }
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
{ … }
EXPORT_SYMBOL_GPL(…);
static void qdisc_pkt_len_init(struct sk_buff *skb)
{ … }
static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
struct sk_buff **to_free,
struct netdev_queue *txq)
{ … }
static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev,
struct netdev_queue *txq)
{ … }
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
static void skb_update_prio(struct sk_buff *skb)
{ … }
#else
#define skb_update_prio …
#endif
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{ … }
EXPORT_SYMBOL(…);
#ifdef CONFIG_NET_EGRESS
static struct netdev_queue *
netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
{ … }
#ifndef CONFIG_PREEMPT_RT
static bool netdev_xmit_txqueue_skipped(void)
{ … }
void netdev_xmit_skip_txqueue(bool skip)
{ … }
EXPORT_SYMBOL_GPL(…);
#else
static bool netdev_xmit_txqueue_skipped(void)
{
return current->net_xmit.skip_txqueue;
}
void netdev_xmit_skip_txqueue(bool skip)
{
current->net_xmit.skip_txqueue = skip;
}
EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
#endif
#endif
#ifdef CONFIG_NET_XGRESS
static int tc_run(struct tcx_entry *entry, struct sk_buff *skb,
enum skb_drop_reason *drop_reason)
{ … }
static DEFINE_STATIC_KEY_FALSE(tcx_needed_key);
void tcx_inc(void)
{ … }
void tcx_dec(void)
{ … }
static __always_inline enum tcx_action_base
tcx_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
const bool needs_mac)
{ … }
static __always_inline struct sk_buff *
sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
struct net_device *orig_dev, bool *another)
{ … }
static __always_inline struct sk_buff *
sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
{ … }
#else
static __always_inline struct sk_buff *
sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
struct net_device *orig_dev, bool *another)
{
return skb;
}
static __always_inline struct sk_buff *
sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
{
return skb;
}
#endif
#ifdef CONFIG_XPS
static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
struct xps_dev_maps *dev_maps, unsigned int tci)
{ … }
#endif
static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
struct sk_buff *skb)
{ … }
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{ … }
EXPORT_SYMBOL(…);
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{ … }
EXPORT_SYMBOL(…);
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{ … }
EXPORT_SYMBOL(…);
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev)
{ … }
int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
{ … }
EXPORT_SYMBOL(…);
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
{ … }
EXPORT_SYMBOL(…);
static DEFINE_PER_CPU(struct task_struct *, backlog_napi);
int weight_p __read_mostly = …;
int dev_weight_rx_bias __read_mostly = …;
int dev_weight_tx_bias __read_mostly = …;
static inline void ____napi_schedule(struct softnet_data *sd,
struct napi_struct *napi)
{ … }
#ifdef CONFIG_RPS
struct static_key_false rps_needed __read_mostly;
EXPORT_SYMBOL(…);
struct static_key_false rfs_needed __read_mostly;
EXPORT_SYMBOL(…);
static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *rflow, u16 next_cpu)
{ … }
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow **rflowp)
{ … }
#ifdef CONFIG_RFS_ACCEL
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
u32 flow_id, u16 filter_id)
{ … }
EXPORT_SYMBOL(…);
#endif
static void rps_trigger_softirq(void *data)
{ … }
#endif
static void trigger_rx_softirq(void *data)
{ … }
static void napi_schedule_rps(struct softnet_data *sd)
{ … }
void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu)
{ … }
#ifdef CONFIG_NET_FLOW_LIMIT
int netdev_flow_limit_table_len __read_mostly = …;
#endif
static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
{ … }
static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
unsigned int *qtail)
{ … }
static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
{ … }
u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{ … }
static int
netif_skb_check_for_xdp(struct sk_buff **pskb, struct bpf_prog *prog)
{ … }
static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{ … }
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
{ … }
static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
{ … }
EXPORT_SYMBOL_GPL(…);
static int netif_rx_internal(struct sk_buff *skb)
{ … }
int __netif_rx(struct sk_buff *skb)
{ … }
EXPORT_SYMBOL(…);
int netif_rx(struct sk_buff *skb)
{ … }
EXPORT_SYMBOL(…);
static __latent_entropy void net_tx_action(struct softirq_action *h)
{ … }
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
int (*br_fdb_test_addr_hook)(struct net_device *dev,
unsigned char *addr) __read_mostly;
EXPORT_SYMBOL_GPL(…);
#endif
bool netdev_is_rx_handler_busy(struct net_device *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
int netdev_rx_handler_register(struct net_device *dev,
rx_handler_func_t *rx_handler,
void *rx_handler_data)
{ … }
EXPORT_SYMBOL_GPL(…);
void netdev_rx_handler_unregister(struct net_device *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
{ … }
static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
int *ret, struct net_device *orig_dev)
{ … }
static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
struct packet_type **ppt_prev)
{ … }
static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
{ … }
int netif_receive_skb_core(struct sk_buff *skb)
{ … }
EXPORT_SYMBOL(…);
static inline void __netif_receive_skb_list_ptype(struct list_head *head,
struct packet_type *pt_prev,
struct net_device *orig_dev)
{ … }
static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
{ … }
static int __netif_receive_skb(struct sk_buff *skb)
{ … }
static void __netif_receive_skb_list(struct list_head *head)
{ … }
static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
{ … }
static int netif_receive_skb_internal(struct sk_buff *skb)
{ … }
void netif_receive_skb_list_internal(struct list_head *head)
{ … }
int netif_receive_skb(struct sk_buff *skb)
{ … }
EXPORT_SYMBOL(…);
void netif_receive_skb_list(struct list_head *head)
{ … }
EXPORT_SYMBOL(…);
static DEFINE_PER_CPU(struct work_struct, flush_works);
static void flush_backlog(struct work_struct *work)
{ … }
static bool flush_required(int cpu)
{ … }
static void flush_all_backlogs(void)
{ … }
static void net_rps_send_ipi(struct softnet_data *remsd)
{ … }
static void net_rps_action_and_irq_enable(struct softnet_data *sd)
{ … }
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
{ … }
static int process_backlog(struct napi_struct *napi, int quota)
{ … }
void __napi_schedule(struct napi_struct *n)
{ … }
EXPORT_SYMBOL(…);
bool napi_schedule_prep(struct napi_struct *n)
{ … }
EXPORT_SYMBOL(…);
void __napi_schedule_irqoff(struct napi_struct *n)
{ … }
EXPORT_SYMBOL(…);
bool napi_complete_done(struct napi_struct *n, int work_done)
{ … }
EXPORT_SYMBOL(…);
struct napi_struct *napi_by_id(unsigned int napi_id)
{ … }
static void skb_defer_free_flush(struct softnet_data *sd)
{ … }
#if defined(CONFIG_NET_RX_BUSY_POLL)
static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
{ … }
enum { … };
static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
unsigned flags, u16 budget)
{ … }
static void __napi_busy_loop(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg, unsigned flags, u16 budget)
{ … }
void napi_busy_loop_rcu(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg, bool prefer_busy_poll, u16 budget)
{ … }
void napi_busy_loop(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg, bool prefer_busy_poll, u16 budget)
{ … }
EXPORT_SYMBOL(…);
#endif
static void napi_hash_add(struct napi_struct *napi)
{ … }
static void napi_hash_del(struct napi_struct *napi)
{ … }
static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
{ … }
static void init_gro_hash(struct napi_struct *napi)
{ … }
int dev_set_threaded(struct net_device *dev, bool threaded)
{ … }
EXPORT_SYMBOL(…);
void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
enum netdev_queue_type type, struct napi_struct *napi)
{ … }
EXPORT_SYMBOL(…);
void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight)
{ … }
EXPORT_SYMBOL(…);
void napi_disable(struct napi_struct *n)
{ … }
EXPORT_SYMBOL(…);
void napi_enable(struct napi_struct *n)
{ … }
EXPORT_SYMBOL(…);
static void flush_gro_hash(struct napi_struct *napi)
{ … }
void __netif_napi_del(struct napi_struct *napi)
{ … }
EXPORT_SYMBOL(…);
static int __napi_poll(struct napi_struct *n, bool *repoll)
{ … }
static int napi_poll(struct napi_struct *n, struct list_head *repoll)
{ … }
static int napi_thread_wait(struct napi_struct *napi)
{ … }
static void napi_threaded_poll_loop(struct napi_struct *napi)
{ … }
static int napi_threaded_poll(void *data)
{ … }
static __latent_entropy void net_rx_action(struct softirq_action *h)
{ … }
struct netdev_adjacent { … };
static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
struct list_head *adj_list)
{ … }
static int ____netdev_has_upper_dev(struct net_device *upper_dev,
struct netdev_nested_priv *priv)
{ … }
bool netdev_has_upper_dev(struct net_device *dev,
struct net_device *upper_dev)
{ … }
EXPORT_SYMBOL(…);
bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
struct net_device *upper_dev)
{ … }
EXPORT_SYMBOL(…);
bool netdev_has_any_upper_dev(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
{ … }
static bool netdev_has_any_lower_dev(struct net_device *dev)
{ … }
void *netdev_adjacent_get_private(struct list_head *adj_list)
{ … }
EXPORT_SYMBOL(…);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter)
{ … }
EXPORT_SYMBOL(…);
static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
struct list_head **iter,
bool *ignore)
{ … }
static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
struct list_head **iter)
{ … }
static int __netdev_walk_all_upper_dev(struct net_device *dev,
int (*fn)(struct net_device *dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv)
{ … }
int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool __netdev_has_upper_dev(struct net_device *dev,
struct net_device *upper_dev)
{ … }
void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter)
{ … }
EXPORT_SYMBOL(…);
void *netdev_lower_get_next_private_rcu(struct net_device *dev,
struct list_head **iter)
{ … }
EXPORT_SYMBOL(…);
void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
{ … }
EXPORT_SYMBOL(…);
static struct net_device *netdev_next_lower_dev(struct net_device *dev,
struct list_head **iter)
{ … }
static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
struct list_head **iter,
bool *ignore)
{ … }
int netdev_walk_all_lower_dev(struct net_device *dev,
int (*fn)(struct net_device *dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv)
{ … }
EXPORT_SYMBOL_GPL(…);
static int __netdev_walk_all_lower_dev(struct net_device *dev,
int (*fn)(struct net_device *dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv)
{ … }
struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
struct list_head **iter)
{ … }
EXPORT_SYMBOL(…);
static u8 __netdev_upper_depth(struct net_device *dev)
{ … }
static u8 __netdev_lower_depth(struct net_device *dev)
{ … }
static int __netdev_update_upper_level(struct net_device *dev,
struct netdev_nested_priv *__unused)
{ … }
#ifdef CONFIG_LOCKDEP
static LIST_HEAD(net_unlink_list);
static void net_unlink_todo(struct net_device *dev)
{ … }
#endif
static int __netdev_update_lower_level(struct net_device *dev,
struct netdev_nested_priv *priv)
{ … }
int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv)
{ … }
EXPORT_SYMBOL_GPL(…);
void *netdev_lower_get_first_private_rcu(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
static int netdev_adjacent_sysfs_add(struct net_device *dev,
struct net_device *adj_dev,
struct list_head *dev_list)
{ … }
static void netdev_adjacent_sysfs_del(struct net_device *dev,
char *name,
struct list_head *dev_list)
{ … }
static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
struct net_device *adj_dev,
struct list_head *dev_list)
{ … }
static int __netdev_adjacent_dev_insert(struct net_device *dev,
struct net_device *adj_dev,
struct list_head *dev_list,
void *private, bool master)
{ … }
static void __netdev_adjacent_dev_remove(struct net_device *dev,
struct net_device *adj_dev,
u16 ref_nr,
struct list_head *dev_list)
{ … }
static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
struct net_device *upper_dev,
struct list_head *up_list,
struct list_head *down_list,
void *private, bool master)
{ … }
static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
struct net_device *upper_dev,
u16 ref_nr,
struct list_head *up_list,
struct list_head *down_list)
{ … }
static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
struct net_device *upper_dev,
void *private, bool master)
{ … }
static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
struct net_device *upper_dev)
{ … }
static int __netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev, bool master,
void *upper_priv, void *upper_info,
struct netdev_nested_priv *priv,
struct netlink_ext_ack *extack)
{ … }
int netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev,
struct netlink_ext_ack *extack)
{ … }
EXPORT_SYMBOL(…);
int netdev_master_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev,
void *upper_priv, void *upper_info,
struct netlink_ext_ack *extack)
{ … }
EXPORT_SYMBOL(…);
static void __netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev,
struct netdev_nested_priv *priv)
{ … }
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev)
{ … }
EXPORT_SYMBOL(…);
static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
struct net_device *lower_dev,
bool val)
{ … }
static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
struct net_device *lower_dev)
{ … }
static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
struct net_device *lower_dev)
{ … }
int netdev_adjacent_change_prepare(struct net_device *old_dev,
struct net_device *new_dev,
struct net_device *dev,
struct netlink_ext_ack *extack)
{ … }
EXPORT_SYMBOL(…);
void netdev_adjacent_change_commit(struct net_device *old_dev,
struct net_device *new_dev,
struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
void netdev_adjacent_change_abort(struct net_device *old_dev,
struct net_device *new_dev,
struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
void netdev_bonding_info_change(struct net_device *dev,
struct netdev_bonding_info *bonding_info)
{ … }
EXPORT_SYMBOL(…);
static int netdev_offload_xstats_enable_l3(struct net_device *dev,
struct netlink_ext_ack *extack)
{ … }
int netdev_offload_xstats_enable(struct net_device *dev,
enum netdev_offload_xstats_type type,
struct netlink_ext_ack *extack)
{ … }
EXPORT_SYMBOL(…);
static void netdev_offload_xstats_disable_l3(struct net_device *dev)
{ … }
int netdev_offload_xstats_disable(struct net_device *dev,
enum netdev_offload_xstats_type type)
{ … }
EXPORT_SYMBOL(…);
static void netdev_offload_xstats_disable_all(struct net_device *dev)
{ … }
static struct rtnl_hw_stats64 *
netdev_offload_xstats_get_ptr(const struct net_device *dev,
enum netdev_offload_xstats_type type)
{ … }
bool netdev_offload_xstats_enabled(const struct net_device *dev,
enum netdev_offload_xstats_type type)
{ … }
EXPORT_SYMBOL(…);
struct netdev_notifier_offload_xstats_ru { … };
struct netdev_notifier_offload_xstats_rd { … };
static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest,
const struct rtnl_hw_stats64 *src)
{ … }
static int netdev_offload_xstats_get_used(struct net_device *dev,
enum netdev_offload_xstats_type type,
bool *p_used,
struct netlink_ext_ack *extack)
{ … }
static int netdev_offload_xstats_get_stats(struct net_device *dev,
enum netdev_offload_xstats_type type,
struct rtnl_hw_stats64 *p_stats,
bool *p_used,
struct netlink_ext_ack *extack)
{ … }
int netdev_offload_xstats_get(struct net_device *dev,
enum netdev_offload_xstats_type type,
struct rtnl_hw_stats64 *p_stats, bool *p_used,
struct netlink_ext_ack *extack)
{ … }
EXPORT_SYMBOL(…);
void
netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta,
const struct rtnl_hw_stats64 *stats)
{ … }
EXPORT_SYMBOL(…);
void
netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used)
{ … }
EXPORT_SYMBOL(…);
void netdev_offload_xstats_push_delta(struct net_device *dev,
enum netdev_offload_xstats_type type,
const struct rtnl_hw_stats64 *p_stats)
{ … }
EXPORT_SYMBOL(…);
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves)
{ … }
EXPORT_SYMBOL(…);
static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
struct sock *sk)
{ … }
struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
struct sock *sk)
{ … }
EXPORT_SYMBOL(…);
static void netdev_adjacent_add_links(struct net_device *dev)
{ … }
static void netdev_adjacent_del_links(struct net_device *dev)
{ … }
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
{ … }
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev)
{ … }
EXPORT_SYMBOL(…);
void netdev_lower_state_changed(struct net_device *lower_dev,
void *lower_state_info)
{ … }
EXPORT_SYMBOL(…);
static void dev_change_rx_flags(struct net_device *dev, int flags)
{ … }
static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
{ … }
int dev_set_promiscuity(struct net_device *dev, int inc)
{ … }
EXPORT_SYMBOL(…);
static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
{ … }
int dev_set_allmulti(struct net_device *dev, int inc)
{ … }
EXPORT_SYMBOL(…);
void __dev_set_rx_mode(struct net_device *dev)
{ … }
void dev_set_rx_mode(struct net_device *dev)
{ … }
unsigned int dev_get_flags(const struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
int __dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack)
{ … }
void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
unsigned int gchanges, u32 portid,
const struct nlmsghdr *nlh)
{ … }
int dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack)
{ … }
EXPORT_SYMBOL(…);
int __dev_set_mtu(struct net_device *dev, int new_mtu)
{ … }
EXPORT_SYMBOL(…);
int dev_validate_mtu(struct net_device *dev, int new_mtu,
struct netlink_ext_ack *extack)
{ … }
int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
struct netlink_ext_ack *extack)
{ … }
int dev_set_mtu(struct net_device *dev, int new_mtu)
{ … }
EXPORT_SYMBOL(…);
int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
{ … }
void dev_set_group(struct net_device *dev, int new_group)
{ … }
int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
struct netlink_ext_ack *extack)
{ … }
EXPORT_SYMBOL(…);
int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
struct netlink_ext_ack *extack)
{ … }
EXPORT_SYMBOL(…);
DECLARE_RWSEM(…) …;
int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
struct netlink_ext_ack *extack)
{ … }
EXPORT_SYMBOL(…);
int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
{ … }
EXPORT_SYMBOL(…);
int dev_change_carrier(struct net_device *dev, bool new_carrier)
{ … }
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{ … }
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len)
{ … }
int dev_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid,
bool recurse)
{ … }
EXPORT_SYMBOL(…);
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
{ … }
EXPORT_SYMBOL(…);
int dev_change_proto_down(struct net_device *dev, bool proto_down)
{ … }
void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
u32 value)
{ … }
struct bpf_xdp_link { … };
static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
{ … }
static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
{ … }
static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
enum bpf_xdp_mode mode)
{ … }
static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
enum bpf_xdp_mode mode)
{ … }
u8 dev_xdp_prog_count(struct net_device *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
{ … }
static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
struct bpf_xdp_link *link)
{ … }
static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
struct bpf_prog *prog)
{ … }
static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
bpf_op_t bpf_op, struct netlink_ext_ack *extack,
u32 flags, struct bpf_prog *prog)
{ … }
static void dev_xdp_uninstall(struct net_device *dev)
{ … }
static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
struct bpf_xdp_link *link, struct bpf_prog *new_prog,
struct bpf_prog *old_prog, u32 flags)
{ … }
static int dev_xdp_attach_link(struct net_device *dev,
struct netlink_ext_ack *extack,
struct bpf_xdp_link *link)
{ … }
static int dev_xdp_detach_link(struct net_device *dev,
struct netlink_ext_ack *extack,
struct bpf_xdp_link *link)
{ … }
static void bpf_xdp_link_release(struct bpf_link *link)
{ … }
static int bpf_xdp_link_detach(struct bpf_link *link)
{ … }
static void bpf_xdp_link_dealloc(struct bpf_link *link)
{ … }
static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
struct seq_file *seq)
{ … }
static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
struct bpf_link_info *info)
{ … }
static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
struct bpf_prog *old_prog)
{ … }
static const struct bpf_link_ops bpf_xdp_link_lops = …;
int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{ … }
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, int expected_fd, u32 flags)
{ … }
static int dev_index_reserve(struct net *net, u32 ifindex)
{ … }
static void dev_index_release(struct net *net, int ifindex)
{ … }
LIST_HEAD(…);
DECLARE_WAIT_QUEUE_HEAD(…);
atomic_t dev_unreg_count = …;
static void net_set_todo(struct net_device *dev)
{ … }
static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
struct net_device *upper, netdev_features_t features)
{ … }
static void netdev_sync_lower_features(struct net_device *upper,
struct net_device *lower, netdev_features_t features)
{ … }
static netdev_features_t netdev_fix_features(struct net_device *dev,
netdev_features_t features)
{ … }
int __netdev_update_features(struct net_device *dev)
{ … }
void netdev_update_features(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
void netdev_change_features(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
static int netif_alloc_rx_queues(struct net_device *dev)
{ … }
static void netif_free_rx_queues(struct net_device *dev)
{ … }
static void netdev_init_one_queue(struct net_device *dev,
struct netdev_queue *queue, void *_unused)
{ … }
static void netif_free_tx_queues(struct net_device *dev)
{ … }
static int netif_alloc_netdev_queues(struct net_device *dev)
{ … }
void netif_tx_stop_all_queues(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
static int netdev_do_alloc_pcpu_stats(struct net_device *dev)
{ … }
static void netdev_do_free_pcpu_stats(struct net_device *dev)
{ … }
int register_netdevice(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
static void init_dummy_netdev_core(struct net_device *dev)
{ … }
void init_dummy_netdev(struct net_device *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
int register_netdev(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
int netdev_refcnt_read(const struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
int netdev_unregister_timeout_secs __read_mostly = …;
#define WAIT_REFS_MIN_MSECS …
#define WAIT_REFS_MAX_MSECS …
static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
{ … }
void netdev_run_todo(void)
{ … }
static void dev_fetch_dstats(struct rtnl_link_stats64 *s,
const struct pcpu_dstats __percpu *dstats)
{ … }
static void dev_get_dstats64(const struct net_device *dev,
struct rtnl_link_stats64 *s)
{ … }
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
const struct net_device_stats *netdev_stats)
{ … }
EXPORT_SYMBOL(…);
static __cold struct net_device_core_stats __percpu *netdev_core_stats_alloc(
struct net_device *dev)
{ … }
noinline void netdev_core_stats_inc(struct net_device *dev, u32 offset)
{ … }
EXPORT_SYMBOL_GPL(…);
struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *storage)
{ … }
EXPORT_SYMBOL(…);
void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
const struct pcpu_sw_netstats __percpu *netstats)
{ … }
EXPORT_SYMBOL_GPL(…);
void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
{ … }
EXPORT_SYMBOL_GPL(…);
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
{ … }
static const struct ethtool_ops default_ethtool_ops;
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops)
{ … }
EXPORT_SYMBOL_GPL(…);
void netdev_sw_irq_coalesce_default_on(struct net_device *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *),
unsigned int txqs, unsigned int rxqs)
{ … }
EXPORT_SYMBOL(…);
void free_netdev(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
struct net_device *alloc_netdev_dummy(int sizeof_priv)
{ … }
EXPORT_SYMBOL_GPL(…);
void synchronize_net(void)
{ … }
EXPORT_SYMBOL(…);
static void netdev_rss_contexts_free(struct net_device *dev)
{ … }
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
{ … }
EXPORT_SYMBOL(…);
void unregister_netdevice_many_notify(struct list_head *head,
u32 portid, const struct nlmsghdr *nlh)
{ … }
void unregister_netdevice_many(struct list_head *head)
{ … }
EXPORT_SYMBOL(…);
void unregister_netdev(struct net_device *dev)
{ … }
EXPORT_SYMBOL(…);
int __dev_change_net_namespace(struct net_device *dev, struct net *net,
const char *pat, int new_ifindex)
{ … }
EXPORT_SYMBOL_GPL(…);
static int dev_cpu_dead(unsigned int oldcpu)
{ … }
netdev_features_t netdev_increment_features(netdev_features_t all,
netdev_features_t one, netdev_features_t mask)
{ … }
EXPORT_SYMBOL(…);
static struct hlist_head * __net_init netdev_create_hash(void)
{ … }
static int __net_init netdev_init(struct net *net)
{ … }
const char *netdev_drivername(const struct net_device *dev)
{ … }
static void __netdev_printk(const char *level, const struct net_device *dev,
struct va_format *vaf)
{ … }
void netdev_printk(const char *level, const struct net_device *dev,
const char *format, ...)
{ … }
EXPORT_SYMBOL(…);
#define define_netdev_printk_level(func, level) …
define_netdev_printk_level(netdev_emerg, KERN_EMERG);
define_netdev_printk_level(netdev_alert, KERN_ALERT);
define_netdev_printk_level(netdev_crit, KERN_CRIT);
define_netdev_printk_level(netdev_err, KERN_ERR);
define_netdev_printk_level(netdev_warn, KERN_WARNING);
define_netdev_printk_level(netdev_notice, KERN_NOTICE);
define_netdev_printk_level(netdev_info, KERN_INFO);
static void __net_exit netdev_exit(struct net *net)
{ … }
static struct pernet_operations __net_initdata netdev_net_ops = …;
static void __net_exit default_device_exit_net(struct net *net)
{ … }
static void __net_exit default_device_exit_batch(struct list_head *net_list)
{ … }
static struct pernet_operations __net_initdata default_device_ops = …;
static void __init net_dev_struct_check(void)
{ … }
#define SYSTEM_PERCPU_PAGE_POOL_SIZE …
static int net_page_pool_create(int cpuid)
{ … }
static int backlog_napi_should_run(unsigned int cpu)
{ … }
static void run_backlog_napi(unsigned int cpu)
{ … }
static void backlog_napi_setup(unsigned int cpu)
{ … }
static struct smp_hotplug_thread backlog_threads = …;
static int __init net_dev_init(void)
{ … }
subsys_initcall(net_dev_init);