linux/net/ipv4/udp.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		The User Datagram Protocol (UDP).
 *
 * Authors:	Ross Biro
 *		Fred N. van Kempen, <[email protected]>
 *		Arnt Gulbrandsen, <[email protected]>
 *		Alan Cox, <[email protected]>
 *		Hirokazu Takahashi, <[email protected]>
 *
 * Fixes:
 *		Alan Cox	:	verify_area() calls
 *		Alan Cox	: 	stopped close while in use off icmp
 *					messages. Not a fix but a botch that
 *					for udp at least is 'valid'.
 *		Alan Cox	:	Fixed icmp handling properly
 *		Alan Cox	: 	Correct error for oversized datagrams
 *		Alan Cox	:	Tidied select() semantics.
 *		Alan Cox	:	udp_err() fixed properly, also now
 *					select and read wake correctly on errors
 *		Alan Cox	:	udp_send verify_area moved to avoid mem leak
 *		Alan Cox	:	UDP can count its memory
 *		Alan Cox	:	send to an unknown connection causes
 *					an ECONNREFUSED off the icmp, but
 *					does NOT close.
 *		Alan Cox	:	Switched to new sk_buff handlers. No more backlog!
 *		Alan Cox	:	Using generic datagram code. Even smaller and the PEEK
 *					bug no longer crashes it.
 *		Fred Van Kempen	: 	Net2e support for sk->broadcast.
 *		Alan Cox	:	Uses skb_free_datagram
 *		Alan Cox	:	Added get/set sockopt support.
 *		Alan Cox	:	Broadcasting without option set returns EACCES.
 *		Alan Cox	:	No wakeup calls. Instead we now use the callbacks.
 *		Alan Cox	:	Use ip_tos and ip_ttl
 *		Alan Cox	:	SNMP Mibs
 *		Alan Cox	:	MSG_DONTROUTE, and 0.0.0.0 support.
 *		Matt Dillon	:	UDP length checks.
 *		Alan Cox	:	Smarter af_inet used properly.
 *		Alan Cox	:	Use new kernel side addressing.
 *		Alan Cox	:	Incorrect return on truncated datagram receive.
 *	Arnt Gulbrandsen 	:	New udp_send and stuff
 *		Alan Cox	:	Cache last socket
 *		Alan Cox	:	Route cache
 *		Jon Peatfield	:	Minor efficiency fix to sendto().
 *		Mike Shaver	:	RFC1122 checks.
 *		Alan Cox	:	Nonblocking error fix.
 *	Willy Konynenberg	:	Transparent proxying support.
 *		Mike McLagan	:	Routing by source
 *		David S. Miller	:	New socket lookup architecture.
 *					Last socket cache retained as it
 *					does have a high hit rate.
 *		Olaf Kirch	:	Don't linearise iovec on sendmsg.
 *		Andi Kleen	:	Some cleanups, cache destination entry
 *					for connect.
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
 *		Melvin Smith	:	Check msg_name not msg_namelen in sendto(),
 *					return ENOTCONN for unconnected sockets (POSIX)
 *		Janos Farkas	:	don't deliver multi/broadcasts to a different
 *					bound-to-device socket
 *	Hirokazu Takahashi	:	HW checksumming for outgoing UDP
 *					datagrams.
 *	Hirokazu Takahashi	:	sendfile() on UDP works now.
 *		Arnaldo C. Melo :	convert /proc/net/udp to seq_file
 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
 *	Alexey Kuznetsov:		allow both IPv4 and IPv6 sockets to bind
 *					a single port at the same time.
 *	Derek Atkins <[email protected]>: Add Encapulation Support
 *	James Chapman		:	Add L2TP encapsulation type.
 */

#define pr_fmt(fmt)

#include <linux/bpf-cgroup.h>
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/igmp.h>
#include <linux/inetdevice.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/tcp_states.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/inet_hashtables.h>
#include <net/ip_tunnels.h>
#include <net/route.h>
#include <net/checksum.h>
#include <net/gso.h>
#include <net/xfrm.h>
#include <trace/events/udp.h>
#include <linux/static_key.h>
#include <linux/btf_ids.h>
#include <trace/events/skb.h>
#include <net/busy_poll.h>
#include "udp_impl.h"
#include <net/sock_reuseport.h>
#include <net/addrconf.h>
#include <net/udp_tunnel.h>
#include <net/gro.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6_stubs.h>
#endif

struct udp_table udp_table __read_mostly;
EXPORT_SYMBOL();

long sysctl_udp_mem[3] __read_mostly;
EXPORT_SYMBOL();

atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp;
EXPORT_SYMBOL();
DEFINE_PER_CPU(int, udp_memory_per_cpu_fw_alloc);
EXPORT_PER_CPU_SYMBOL_GPL();

#define MAX_UDP_PORTS
#define PORTS_PER_CHAIN

static struct udp_table *udp_get_table_prot(struct sock *sk)
{}

static int udp_lib_lport_inuse(struct net *net, __u16 num,
			       const struct udp_hslot *hslot,
			       unsigned long *bitmap,
			       struct sock *sk, unsigned int log)
{}

/*
 * Note: we still hold spinlock of primary hash chain, so no other writer
 * can insert/delete a socket with local_port == num
 */
static int udp_lib_lport_inuse2(struct net *net, __u16 num,
				struct udp_hslot *hslot2,
				struct sock *sk)
{}

static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
{}

/**
 *  udp_lib_get_port  -  UDP/-Lite port lookup for IPv4 and IPv6
 *
 *  @sk:          socket struct in question
 *  @snum:        port number to look up
 *  @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
 *                   with NULL address
 */
int udp_lib_get_port(struct sock *sk, unsigned short snum,
		     unsigned int hash2_nulladdr)
{}
EXPORT_SYMBOL();

int udp_v4_get_port(struct sock *sk, unsigned short snum)
{}

static int compute_score(struct sock *sk, struct net *net,
			 __be32 saddr, __be16 sport,
			 __be32 daddr, unsigned short hnum,
			 int dif, int sdif)
{}

INDIRECT_CALLABLE_SCOPE
u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
		const __be32 faddr, const __be16 fport)
{}

/* called with rcu_read_lock() */
static struct sock *udp4_lib_lookup2(struct net *net,
				     __be32 saddr, __be16 sport,
				     __be32 daddr, unsigned int hnum,
				     int dif, int sdif,
				     struct udp_hslot *hslot2,
				     struct sk_buff *skb)
{}

/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
 * harder than this. -DaveM
 */
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
		__be16 sport, __be32 daddr, __be16 dport, int dif,
		int sdif, struct udp_table *udptable, struct sk_buff *skb)
{}
EXPORT_SYMBOL_GPL();

static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
						 __be16 sport, __be16 dport,
						 struct udp_table *udptable)
{}

struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
				 __be16 sport, __be16 dport)
{}

/* Must be called under rcu_read_lock().
 * Does increment socket refcount.
 */
#if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
			     __be32 daddr, __be16 dport, int dif)
{}
EXPORT_SYMBOL_GPL();
#endif

static inline bool __udp_is_mcast_sock(struct net *net, const struct sock *sk,
				       __be16 loc_port, __be32 loc_addr,
				       __be16 rmt_port, __be32 rmt_addr,
				       int dif, int sdif, unsigned short hnum)
{}

DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
EXPORT_SYMBOL();

#if IS_ENABLED(CONFIG_IPV6)
DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
EXPORT_SYMBOL();
#endif

void udp_encap_enable(void)
{}
EXPORT_SYMBOL();

void udp_encap_disable(void)
{}
EXPORT_SYMBOL();

/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
 * through error handlers in encapsulations looking for a match.
 */
static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
{}

/* Try to match ICMP errors to UDP tunnels by looking up a socket without
 * reversing source and destination port: this will match tunnels that force the
 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
 * lwtunnels might actually break this assumption by being configured with
 * different destination ports on endpoints, in this case we won't be able to
 * trace ICMP messages back to them.
 *
 * If this doesn't match any socket, probe tunnels with arbitrary destination
 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
 * we've sent packets to won't necessarily match the local destination port.
 *
 * Then ask the tunnel implementation to match the error against a valid
 * association.
 *
 * Return an error if we can't find a match, the socket if we need further
 * processing, zero otherwise.
 */
static struct sock *__udp4_lib_err_encap(struct net *net,
					 const struct iphdr *iph,
					 struct udphdr *uh,
					 struct udp_table *udptable,
					 struct sock *sk,
					 struct sk_buff *skb, u32 info)
{}

/*
 * This routine is called by the ICMP module when it gets some
 * sort of error condition.  If err < 0 then the socket should
 * be closed and the error returned to the user.  If err > 0
 * it's just the icmp type << 8 | icmp code.
 * Header points to the ip header of the error packet. We move
 * on past this. Then (as it used to claim before adjustment)
 * header points to the first 8 bytes of the udp header.  We need
 * to find the appropriate port.
 */

int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
{}

int udp_err(struct sk_buff *skb, u32 info)
{}

/*
 * Throw away all pending data and cancel the corking. Socket is locked.
 */
void udp_flush_pending_frames(struct sock *sk)
{}
EXPORT_SYMBOL();

/**
 * 	udp4_hwcsum  -  handle outgoing HW checksumming
 * 	@skb: 	sk_buff containing the filled-in UDP header
 * 	        (checksum field must be zeroed out)
 *	@src:	source IP address
 *	@dst:	destination IP address
 */
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
{}
EXPORT_SYMBOL_GPL();

/* Function to set UDP checksum for an IPv4 UDP packet. This is intended
 * for the simple case like when setting the checksum for a UDP tunnel.
 */
void udp_set_csum(bool nocheck, struct sk_buff *skb,
		  __be32 saddr, __be32 daddr, int len)
{}
EXPORT_SYMBOL();

static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
			struct inet_cork *cork)
{}

/*
 * Push out all pending data as one UDP datagram. Socket is locked.
 */
int udp_push_pending_frames(struct sock *sk)
{}
EXPORT_SYMBOL();

static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size)
{}

int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size)
{}
EXPORT_SYMBOL_GPL();

int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{}
EXPORT_SYMBOL();

void udp_splice_eof(struct socket *sock)
{}
EXPORT_SYMBOL_GPL();

#define UDP_SKB_IS_STATELESS

/* all head states (dst, sk, nf conntrack) except skb extensions are
 * cleared by udp_rcv().
 *
 * We need to preserve secpath, if present, to eventually process
 * IP_CMSG_PASSSEC at recvmsg() time.
 *
 * Other extensions can be cleared.
 */
static bool udp_try_make_stateless(struct sk_buff *skb)
{}

static void udp_set_dev_scratch(struct sk_buff *skb)
{}

static void udp_skb_csum_unnecessary_set(struct sk_buff *skb)
{}

static int udp_skb_truesize(struct sk_buff *skb)
{}

static bool udp_skb_has_head_state(struct sk_buff *skb)
{}

/* fully reclaim rmem/fwd memory allocated for skb */
static void udp_rmem_release(struct sock *sk, int size, int partial,
			     bool rx_queue_lock_held)
{}

/* Note: called with reader_queue.lock held.
 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
 * This avoids a cache line miss while receive_queue lock is held.
 * Look at __udp_enqueue_schedule_skb() to find where this copy is done.
 */
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
{}
EXPORT_SYMBOL();

/* as above, but the caller held the rx queue lock, too */
static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
{}

/* Idea of busylocks is to let producers grab an extra spinlock
 * to relieve pressure on the receive_queue spinlock shared by consumer.
 * Under flood, this means that only one producer can be in line
 * trying to acquire the receive_queue spinlock.
 * These busylock can be allocated on a per cpu manner, instead of a
 * per socket one (that would consume a cache line per socket)
 */
static int udp_busylocks_log __read_mostly;
static spinlock_t *udp_busylocks __read_mostly;

static spinlock_t *busylock_acquire(void *ptr)
{}

static void busylock_release(spinlock_t *busy)
{}

static int udp_rmem_schedule(struct sock *sk, int size)
{}

int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
{}
EXPORT_SYMBOL_GPL();

void udp_destruct_common(struct sock *sk)
{}
EXPORT_SYMBOL_GPL();

static void udp_destruct_sock(struct sock *sk)
{}

int udp_init_sock(struct sock *sk)
{}

void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
{}
EXPORT_SYMBOL_GPL();

static struct sk_buff *__first_packet_length(struct sock *sk,
					     struct sk_buff_head *rcvq,
					     int *total)
{}

/**
 *	first_packet_length	- return length of first packet in receive queue
 *	@sk: socket
 *
 *	Drops all bad checksum frames, until a valid one is found.
 *	Returns the length of found skb, or -1 if none is found.
 */
static int first_packet_length(struct sock *sk)
{}

/*
 *	IOCTL requests applicable to the UDP protocol
 */

int udp_ioctl(struct sock *sk, int cmd, int *karg)
{}
EXPORT_SYMBOL();

struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
			       int *off, int *err)
{}
EXPORT_SYMBOL();

int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{}
EXPORT_SYMBOL();

/*
 * 	This should be easy, if there is something there we
 * 	return it, otherwise we block.
 */

int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
		int *addr_len)
{}

int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{}
EXPORT_SYMBOL();

int __udp_disconnect(struct sock *sk, int flags)
{}
EXPORT_SYMBOL();

int udp_disconnect(struct sock *sk, int flags)
{}
EXPORT_SYMBOL();

void udp_lib_unhash(struct sock *sk)
{}
EXPORT_SYMBOL();

/*
 * inet_rcv_saddr was changed, we must rehash secondary hash
 */
void udp_lib_rehash(struct sock *sk, u16 newhash)
{}
EXPORT_SYMBOL();

void udp_v4_rehash(struct sock *sk)
{}

static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{}

/* returns:
 *  -1: error
 *   0: success
 *  >0: "udp encap" protocol resubmission
 *
 * Note that in the success and error cases, the skb is assumed to
 * have either been requeued or freed.
 */
static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
{}

static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{}

/* For TCP sockets, sk_rx_dst is protected by socket lock
 * For UDP, we use xchg() to guard against concurrent changes.
 */
bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{}
EXPORT_SYMBOL();

/*
 *	Multicasts and broadcasts go to each listener.
 *
 *	Note: called only from the BH handler context.
 */
static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
				    struct udphdr  *uh,
				    __be32 saddr, __be32 daddr,
				    struct udp_table *udptable,
				    int proto)
{}

/* Initialize UDP checksum. If exited with zero value (success),
 * CHECKSUM_UNNECESSARY means, that no more checks are required.
 * Otherwise, csum completion requires checksumming packet body,
 * including udp header and folding it to skb->csum.
 */
static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
				 int proto)
{}

/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
 * return code conversion for ip layer consumption
 */
static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
			       struct udphdr *uh)
{}

/*
 *	All we need to do is get the socket, and then do a checksum.
 */

int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
		   int proto)
{}

/* We can only early demux multicast if there is a single matching socket.
 * If more than one socket found returns NULL
 */
static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
						  __be16 loc_port, __be32 loc_addr,
						  __be16 rmt_port, __be32 rmt_addr,
						  int dif, int sdif)
{}

/* For unicast we should only early demux connected sockets or we can
 * break forwarding setups.  The chains here can be long so only check
 * if the first socket is an exact match and if not move on.
 */
static struct sock *__udp4_lib_demux_lookup(struct net *net,
					    __be16 loc_port, __be32 loc_addr,
					    __be16 rmt_port, __be32 rmt_addr,
					    int dif, int sdif)
{}

int udp_v4_early_demux(struct sk_buff *skb)
{}

int udp_rcv(struct sk_buff *skb)
{}

void udp_destroy_sock(struct sock *sk)
{}

static void set_xfrm_gro_udp_encap_rcv(__u16 encap_type, unsigned short family,
				       struct sock *sk)
{}

/*
 *	Socket option code for UDP
 */
int udp_lib_setsockopt(struct sock *sk, int level, int optname,
		       sockptr_t optval, unsigned int optlen,
		       int (*push_pending_frames)(struct sock *))
{}
EXPORT_SYMBOL();

int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
		   unsigned int optlen)
{}

int udp_lib_getsockopt(struct sock *sk, int level, int optname,
		       char __user *optval, int __user *optlen)
{}
EXPORT_SYMBOL();

int udp_getsockopt(struct sock *sk, int level, int optname,
		   char __user *optval, int __user *optlen)
{}

/**
 * 	udp_poll - wait for a UDP event.
 *	@file: - file struct
 *	@sock: - socket
 *	@wait: - poll table
 *
 *	This is same as datagram poll, except for the special case of
 *	blocking sockets. If application is using a blocking fd
 *	and a packet with checksum error is in the queue;
 *	then it could get return from select indicating data available
 *	but then block when reading it. Add special case code
 *	to work around these arguably broken applications.
 */
__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
{}
EXPORT_SYMBOL();

int udp_abort(struct sock *sk, int err)
{}
EXPORT_SYMBOL_GPL();

struct proto udp_prot =;
EXPORT_SYMBOL();

/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS

static unsigned short seq_file_family(const struct seq_file *seq);
static bool seq_sk_match(struct seq_file *seq, const struct sock *sk)
{}

#ifdef CONFIG_BPF_SYSCALL
static const struct seq_operations bpf_iter_udp_seq_ops;
#endif
static struct udp_table *udp_get_table_seq(struct seq_file *seq,
					   struct net *net)
{}

static struct sock *udp_get_first(struct seq_file *seq, int start)
{}

static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
{}

static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
{}

void *udp_seq_start(struct seq_file *seq, loff_t *pos)
{}
EXPORT_SYMBOL();

void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{}
EXPORT_SYMBOL();

void udp_seq_stop(struct seq_file *seq, void *v)
{}
EXPORT_SYMBOL();

/* ------------------------------------------------------------------------ */
static void udp4_format_sock(struct sock *sp, struct seq_file *f,
		int bucket)
{}

int udp4_seq_show(struct seq_file *seq, void *v)
{}

#ifdef CONFIG_BPF_SYSCALL
struct bpf_iter__udp {};

struct bpf_udp_iter_state {};

static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
				      unsigned int new_batch_sz);
static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
{}

static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{}

static void *bpf_iter_udp_seq_start(struct seq_file *seq, loff_t *pos)
{}

static int udp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
			     struct udp_sock *udp_sk, uid_t uid, int bucket)
{}

static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v)
{}

static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state *iter)
{}

static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v)
{}

static const struct seq_operations bpf_iter_udp_seq_ops =;
#endif

static unsigned short seq_file_family(const struct seq_file *seq)
{}

const struct seq_operations udp_seq_ops =;
EXPORT_SYMBOL();

static struct udp_seq_afinfo udp4_seq_afinfo =;

static int __net_init udp4_proc_init_net(struct net *net)
{}

static void __net_exit udp4_proc_exit_net(struct net *net)
{}

static struct pernet_operations udp4_net_ops =;

int __init udp4_proc_init(void)
{}

void udp4_proc_exit(void)
{}
#endif /* CONFIG_PROC_FS */

static __initdata unsigned long uhash_entries;
static int __init set_uhash_entries(char *str)
{}
__setup();

void __init udp_table_init(struct udp_table *table, const char *name)
{}

u32 udp_flow_hashrnd(void)
{}
EXPORT_SYMBOL();

static void __net_init udp_sysctl_init(struct net *net)
{}

static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_entries)
{}

static void __net_exit udp_pernet_table_free(struct net *net)
{}

static void __net_init udp_set_table(struct net *net)
{}

static int __net_init udp_pernet_init(struct net *net)
{}

static void __net_exit udp_pernet_exit(struct net *net)
{}

static struct pernet_operations __net_initdata udp_sysctl_ops =;

#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta,
		     struct udp_sock *udp_sk, uid_t uid, int bucket)

static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
				      unsigned int new_batch_sz)
{}

#define INIT_BATCH_SZ

static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux)
{}

static void bpf_iter_fini_udp(void *priv_data)
{}

static const struct bpf_iter_seq_info udp_seq_info =;

static struct bpf_iter_reg udp_reg_info =;

static void __init bpf_iter_register(void)
{}
#endif

void __init udp_init(void)
{}