linux/net/core/sock.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Generic socket support routines. Memory allocators, socket lock/release
 *		handler for protocols to use and generic option handler.
 *
 * Authors:	Ross Biro
 *		Fred N. van Kempen, <[email protected]>
 *		Florian La Roche, <[email protected]>
 *		Alan Cox, <[email protected]>
 *
 * Fixes:
 *		Alan Cox	: 	Numerous verify_area() problems
 *		Alan Cox	:	Connecting on a connecting socket
 *					now returns an error for tcp.
 *		Alan Cox	:	sock->protocol is set correctly.
 *					and is not sometimes left as 0.
 *		Alan Cox	:	connect handles icmp errors on a
 *					connect properly. Unfortunately there
 *					is a restart syscall nasty there. I
 *					can't match BSD without hacking the C
 *					library. Ideas urgently sought!
 *		Alan Cox	:	Disallow bind() to addresses that are
 *					not ours - especially broadcast ones!!
 *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
 *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
 *					instead they leave that for the DESTROY timer.
 *		Alan Cox	:	Clean up error flag in accept
 *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
 *					was buggy. Put a remove_sock() in the handler
 *					for memory when we hit 0. Also altered the timer
 *					code. The ACK stuff can wait and needs major
 *					TCP layer surgery.
 *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
 *					and fixed timer/inet_bh race.
 *		Alan Cox	:	Added zapped flag for TCP
 *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
 *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
 *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
 *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
 *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
 *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
 *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
 *	Pauline Middelink	:	identd support
 *		Alan Cox	:	Fixed connect() taking signals I think.
 *		Alan Cox	:	SO_LINGER supported
 *		Alan Cox	:	Error reporting fixes
 *		Anonymous	:	inet_create tidied up (sk->reuse setting)
 *		Alan Cox	:	inet sockets don't set sk->type!
 *		Alan Cox	:	Split socket option code
 *		Alan Cox	:	Callbacks
 *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
 *		Alex		:	Removed restriction on inet fioctl
 *		Alan Cox	:	Splitting INET from NET core
 *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
 *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
 *		Alan Cox	:	Split IP from generic code
 *		Alan Cox	:	New kfree_skbmem()
 *		Alan Cox	:	Make SO_DEBUG superuser only.
 *		Alan Cox	:	Allow anyone to clear SO_DEBUG
 *					(compatibility fix)
 *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
 *		Alan Cox	:	Allocator for a socket is settable.
 *		Alan Cox	:	SO_ERROR includes soft errors.
 *		Alan Cox	:	Allow NULL arguments on some SO_ opts
 *		Alan Cox	: 	Generic socket allocation to make hooks
 *					easier (suggested by Craig Metz).
 *		Michael Pall	:	SO_ERROR returns positive errno again
 *              Steve Whitehouse:       Added default destructor to free
 *                                      protocol private data.
 *              Steve Whitehouse:       Added various other default routines
 *                                      common to several socket families.
 *              Chris Evans     :       Call suser() check last on F_SETOWN
 *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
 *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
 *		Andi Kleen	:	Fix write_space callback
 *		Chris Evans	:	Security fixes - signedness again
 *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
 *
 * To Fix:
 */

#define pr_fmt(fmt)

#include <asm/unaligned.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/errqueue.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/user_namespace.h>
#include <linux/static_key.h>
#include <linux/memcontrol.h>
#include <linux/prefetch.h>
#include <linux/compat.h>
#include <linux/mroute.h>
#include <linux/mroute6.h>
#include <linux/icmpv6.h>

#include <linux/uaccess.h>

#include <linux/netdevice.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/net_namespace.h>
#include <net/request_sock.h>
#include <net/sock.h>
#include <net/proto_memory.h>
#include <linux/net_tstamp.h>
#include <net/xfrm.h>
#include <linux/ipsec.h>
#include <net/cls_cgroup.h>
#include <net/netprio_cgroup.h>
#include <linux/sock_diag.h>

#include <linux/filter.h>
#include <net/sock_reuseport.h>
#include <net/bpf_sk_storage.h>

#include <trace/events/sock.h>

#include <net/tcp.h>
#include <net/busy_poll.h>
#include <net/phonet/phonet.h>

#include <linux/ethtool.h>

#include "dev.h"

static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);

static void sock_def_write_space_wfree(struct sock *sk);
static void sock_def_write_space(struct sock *sk);

/**
 * sk_ns_capable - General socket capability test
 * @sk: Socket to use a capability on or through
 * @user_ns: The user namespace of the capability to use
 * @cap: The capability to use
 *
 * Test to see if the opener of the socket had when the socket was
 * created and the current process has the capability @cap in the user
 * namespace @user_ns.
 */
bool sk_ns_capable(const struct sock *sk,
		   struct user_namespace *user_ns, int cap)
{}
EXPORT_SYMBOL();

/**
 * sk_capable - Socket global capability test
 * @sk: Socket to use a capability on or through
 * @cap: The global capability to use
 *
 * Test to see if the opener of the socket had when the socket was
 * created and the current process has the capability @cap in all user
 * namespaces.
 */
bool sk_capable(const struct sock *sk, int cap)
{}
EXPORT_SYMBOL();

/**
 * sk_net_capable - Network namespace socket capability test
 * @sk: Socket to use a capability on or through
 * @cap: The capability to use
 *
 * Test to see if the opener of the socket had when the socket was created
 * and the current process has the capability @cap over the network namespace
 * the socket is a member of.
 */
bool sk_net_capable(const struct sock *sk, int cap)
{}
EXPORT_SYMBOL();

/*
 * Each address family might have different locking rules, so we have
 * one slock key per address family and separate keys for internal and
 * userspace sockets.
 */
static struct lock_class_key af_family_keys[AF_MAX];
static struct lock_class_key af_family_kern_keys[AF_MAX];
static struct lock_class_key af_family_slock_keys[AF_MAX];
static struct lock_class_key af_family_kern_slock_keys[AF_MAX];

/*
 * Make lock validator output more readable. (we pre-construct these
 * strings build-time, so that runtime initialization of socket
 * locks is fast):
 */

#define _sock_locks(x)

static const char *const af_family_key_strings[AF_MAX+1] =;
static const char *const af_family_slock_key_strings[AF_MAX+1] =;
static const char *const af_family_clock_key_strings[AF_MAX+1] =;

static const char *const af_family_kern_key_strings[AF_MAX+1] =;
static const char *const af_family_kern_slock_key_strings[AF_MAX+1] =;
static const char *const af_family_kern_clock_key_strings[AF_MAX+1] =;
static const char *const af_family_rlock_key_strings[AF_MAX+1] =;
static const char *const af_family_wlock_key_strings[AF_MAX+1] =;
static const char *const af_family_elock_key_strings[AF_MAX+1] =;

/*
 * sk_callback_lock and sk queues locking rules are per-address-family,
 * so split the lock classes by using a per-AF key:
 */
static struct lock_class_key af_callback_keys[AF_MAX];
static struct lock_class_key af_rlock_keys[AF_MAX];
static struct lock_class_key af_wlock_keys[AF_MAX];
static struct lock_class_key af_elock_keys[AF_MAX];
static struct lock_class_key af_kern_callback_keys[AF_MAX];

/* Run time adjustable parameters. */
__u32 sysctl_wmem_max __read_mostly =;
EXPORT_SYMBOL();
__u32 sysctl_rmem_max __read_mostly =;
EXPORT_SYMBOL();
__u32 sysctl_wmem_default __read_mostly =;
__u32 sysctl_rmem_default __read_mostly =;

int sysctl_tstamp_allow_data __read_mostly =;

DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
EXPORT_SYMBOL_GPL();

/**
 * sk_set_memalloc - sets %SOCK_MEMALLOC
 * @sk: socket to set it on
 *
 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
 * It's the responsibility of the admin to adjust min_free_kbytes
 * to meet the requirements
 */
void sk_set_memalloc(struct sock *sk)
{}
EXPORT_SYMBOL_GPL();

void sk_clear_memalloc(struct sock *sk)
{}
EXPORT_SYMBOL_GPL();

int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{}
EXPORT_SYMBOL();

void sk_error_report(struct sock *sk)
{}
EXPORT_SYMBOL();

int sock_get_timeout(long timeo, void *optval, bool old_timeval)
{}
EXPORT_SYMBOL();

int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
			   sockptr_t optval, int optlen, bool old_timeval)
{}
EXPORT_SYMBOL();

static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
			    bool old_timeval)
{}

static bool sock_needs_netstamp(const struct sock *sk)
{}

static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
{}


int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{}
EXPORT_SYMBOL();

int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
			      enum skb_drop_reason *reason)
{}
EXPORT_SYMBOL();

int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
		     const int nested, unsigned int trim_cap, bool refcounted)
{}
EXPORT_SYMBOL();

INDIRECT_CALLABLE_DECLARE();
INDIRECT_CALLABLE_DECLARE();
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
{}
EXPORT_SYMBOL();

struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
{}
EXPORT_SYMBOL();

static int sock_bindtoindex_locked(struct sock *sk, int ifindex)
{}

int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk)
{}
EXPORT_SYMBOL();

static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen)
{}

static int sock_getbindtodevice(struct sock *sk, sockptr_t optval,
				sockptr_t optlen, int len)
{}

bool sk_mc_loop(const struct sock *sk)
{}
EXPORT_SYMBOL();

void sock_set_reuseaddr(struct sock *sk)
{}
EXPORT_SYMBOL();

void sock_set_reuseport(struct sock *sk)
{}
EXPORT_SYMBOL();

void sock_no_linger(struct sock *sk)
{}
EXPORT_SYMBOL();

void sock_set_priority(struct sock *sk, u32 priority)
{}
EXPORT_SYMBOL();

void sock_set_sndtimeo(struct sock *sk, s64 secs)
{}
EXPORT_SYMBOL();

static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
{}

void sock_enable_timestamps(struct sock *sk)
{}
EXPORT_SYMBOL();

void sock_set_timestamp(struct sock *sk, int optname, bool valbool)
{}

static int sock_timestamping_bind_phc(struct sock *sk, int phc_index)
{}

int sock_set_timestamping(struct sock *sk, int optname,
			  struct so_timestamping timestamping)
{}

void sock_set_keepalive(struct sock *sk)
{}
EXPORT_SYMBOL();

static void __sock_set_rcvbuf(struct sock *sk, int val)
{}

void sock_set_rcvbuf(struct sock *sk, int val)
{}
EXPORT_SYMBOL();

static void __sock_set_mark(struct sock *sk, u32 val)
{}

void sock_set_mark(struct sock *sk, u32 val)
{}
EXPORT_SYMBOL();

static void sock_release_reserved_memory(struct sock *sk, int bytes)
{}

static int sock_reserve_memory(struct sock *sk, int bytes)
{}

void sockopt_lock_sock(struct sock *sk)
{}
EXPORT_SYMBOL();

void sockopt_release_sock(struct sock *sk)
{}
EXPORT_SYMBOL();

bool sockopt_ns_capable(struct user_namespace *ns, int cap)
{}
EXPORT_SYMBOL();

bool sockopt_capable(int cap)
{}
EXPORT_SYMBOL();

static int sockopt_validate_clockid(__kernel_clockid_t value)
{}

/*
 *	This is meant for all protocols to use and covers goings on
 *	at the socket level. Everything here is generic.
 */

int sk_setsockopt(struct sock *sk, int level, int optname,
		  sockptr_t optval, unsigned int optlen)
{}

int sock_setsockopt(struct socket *sock, int level, int optname,
		    sockptr_t optval, unsigned int optlen)
{}
EXPORT_SYMBOL();

static const struct cred *sk_get_peer_cred(struct sock *sk)
{}

static void cred_to_ucred(struct pid *pid, const struct cred *cred,
			  struct ucred *ucred)
{}

static int groups_to_user(sockptr_t dst, const struct group_info *src)
{}

int sk_getsockopt(struct sock *sk, int level, int optname,
		  sockptr_t optval, sockptr_t optlen)
{}

/*
 * Initialize an sk_lock.
 *
 * (We also register the sk_lock with the lock validator.)
 */
static inline void sock_lock_init(struct sock *sk)
{}

/*
 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
 * even temporarly, because of RCU lookups. sk_node should also be left as is.
 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
 */
static void sock_copy(struct sock *nsk, const struct sock *osk)
{}

static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
		int family)
{}

static void sk_prot_free(struct proto *prot, struct sock *sk)
{}

/**
 *	sk_alloc - All socket objects are allocated here
 *	@net: the applicable net namespace
 *	@family: protocol family
 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
 *	@prot: struct proto associated with this new sock instance
 *	@kern: is this to be a kernel socket?
 */
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
		      struct proto *prot, int kern)
{}
EXPORT_SYMBOL();

/* Sockets having SOCK_RCU_FREE will call this function after one RCU
 * grace period. This is the case for UDP sockets and TCP listeners.
 */
static void __sk_destruct(struct rcu_head *head)
{}

void sk_destruct(struct sock *sk)
{}

static void __sk_free(struct sock *sk)
{}

void sk_free(struct sock *sk)
{}
EXPORT_SYMBOL();

static void sk_init_common(struct sock *sk)
{}

/**
 *	sk_clone_lock - clone a socket, and lock its clone
 *	@sk: the socket to clone
 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
 *
 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
 */
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
{}
EXPORT_SYMBOL_GPL();

void sk_free_unlock_clone(struct sock *sk)
{}
EXPORT_SYMBOL_GPL();

static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
{}

void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{}
EXPORT_SYMBOL_GPL();

/*
 *	Simple resource managers for sockets.
 */


/*
 * Write buffer destructor automatically called from kfree_skb.
 */
void sock_wfree(struct sk_buff *skb)
{}
EXPORT_SYMBOL();

/* This variant of sock_wfree() is used by TCP,
 * since it sets SOCK_USE_WRITE_QUEUE.
 */
void __sock_wfree(struct sk_buff *skb)
{}

void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
{}
EXPORT_SYMBOL();

static bool can_skb_orphan_partial(const struct sk_buff *skb)
{}

/* This helper is used by netem, as it can hold packets in its
 * delay queue. We want to allow the owner socket to send more
 * packets, as if they were already TX completed by a typical driver.
 * But we also want to keep skb->sk set because some packet schedulers
 * rely on it (sch_fq for example).
 */
void skb_orphan_partial(struct sk_buff *skb)
{}
EXPORT_SYMBOL();

/*
 * Read buffer destructor automatically called from kfree_skb.
 */
void sock_rfree(struct sk_buff *skb)
{}
EXPORT_SYMBOL();

/*
 * Buffer destructor for skbs that are not used directly in read or write
 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
 */
void sock_efree(struct sk_buff *skb)
{}
EXPORT_SYMBOL();

/* Buffer destructor for prefetch/receive path where reference count may
 * not be held, e.g. for listen sockets.
 */
#ifdef CONFIG_INET
void sock_pfree(struct sk_buff *skb)
{}
EXPORT_SYMBOL();
#endif /* CONFIG_INET */

kuid_t sock_i_uid(struct sock *sk)
{}
EXPORT_SYMBOL();

unsigned long __sock_i_ino(struct sock *sk)
{}
EXPORT_SYMBOL();

unsigned long sock_i_ino(struct sock *sk)
{}
EXPORT_SYMBOL();

/*
 * Allocate a skb from the socket's send buffer.
 */
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
			     gfp_t priority)
{}
EXPORT_SYMBOL();

static void sock_ofree(struct sk_buff *skb)
{}

struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
			     gfp_t priority)
{}

/*
 * Allocate a memory block from the socket's option memory buffer.
 */
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
{}
EXPORT_SYMBOL();

/* Free an option memory block. Note, we actually want the inline
 * here as this allows gcc to detect the nullify and fold away the
 * condition entirely.
 */
static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
				  const bool nullify)
{}

void sock_kfree_s(struct sock *sk, void *mem, int size)
{}
EXPORT_SYMBOL();

void sock_kzfree_s(struct sock *sk, void *mem, int size)
{}
EXPORT_SYMBOL();

/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
   I think, these locks should be removed for datagram sockets.
 */
static long sock_wait_for_wmem(struct sock *sk, long timeo)
{}


/*
 *	Generic send/receive buffer handlers
 */

struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
				     unsigned long data_len, int noblock,
				     int *errcode, int max_page_order)
{}
EXPORT_SYMBOL();

int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
		     struct sockcm_cookie *sockc)
{}
EXPORT_SYMBOL();

int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
		   struct sockcm_cookie *sockc)
{}
EXPORT_SYMBOL();

static void sk_enter_memory_pressure(struct sock *sk)
{}

static void sk_leave_memory_pressure(struct sock *sk)
{}

DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);

/**
 * skb_page_frag_refill - check that a page_frag contains enough room
 * @sz: minimum size of the fragment we want to get
 * @pfrag: pointer to page_frag
 * @gfp: priority for memory allocation
 *
 * Note: While this allocator tries to use high order pages, there is
 * no guarantee that allocations succeed. Therefore, @sz MUST be
 * less or equal than PAGE_SIZE.
 */
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
{}
EXPORT_SYMBOL();

bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
{}
EXPORT_SYMBOL();

void __lock_sock(struct sock *sk)
	__releases(&sk->sk_lock.slock)
	__acquires(&sk->sk_lock.slock)
{}

void __release_sock(struct sock *sk)
	__releases(&sk->sk_lock.slock)
	__acquires(&sk->sk_lock.slock)
{}

void __sk_flush_backlog(struct sock *sk)
{}
EXPORT_SYMBOL_GPL();

/**
 * sk_wait_data - wait for data to arrive at sk_receive_queue
 * @sk:    sock to wait on
 * @timeo: for how long
 * @skb:   last skb seen on sk_receive_queue
 *
 * Now socket state including sk->sk_err is changed only under lock,
 * hence we may omit checks after joining wait queue.
 * We check receive queue before schedule() only as optimization;
 * it is very likely that release_sock() added new data.
 */
int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
{}
EXPORT_SYMBOL();

/**
 *	__sk_mem_raise_allocated - increase memory_allocated
 *	@sk: socket
 *	@size: memory size to allocate
 *	@amt: pages to allocate
 *	@kind: allocation type
 *
 *	Similar to __sk_mem_schedule(), but does not update sk_forward_alloc.
 *
 *	Unlike the globally shared limits among the sockets under same protocol,
 *	consuming the budget of a memcg won't have direct effect on other ones.
 *	So be optimistic about memcg's tolerance, and leave the callers to decide
 *	whether or not to raise allocated through sk_under_memory_pressure() or
 *	its variants.
 */
int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
{}

/**
 *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
 *	@sk: socket
 *	@size: memory size to allocate
 *	@kind: allocation type
 *
 *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
 *	rmem allocation. This function assumes that protocols which have
 *	memory_pressure use sk_wmem_queued as write buffer accounting.
 */
int __sk_mem_schedule(struct sock *sk, int size, int kind)
{}
EXPORT_SYMBOL();

/**
 *	__sk_mem_reduce_allocated - reclaim memory_allocated
 *	@sk: socket
 *	@amount: number of quanta
 *
 *	Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
 */
void __sk_mem_reduce_allocated(struct sock *sk, int amount)
{}

/**
 *	__sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
 *	@sk: socket
 *	@amount: number of bytes (rounded down to a PAGE_SIZE multiple)
 */
void __sk_mem_reclaim(struct sock *sk, int amount)
{}
EXPORT_SYMBOL();

int sk_set_peek_off(struct sock *sk, int val)
{}
EXPORT_SYMBOL_GPL();

/*
 * Set of default routines for initialising struct proto_ops when
 * the protocol does not support a particular function. In certain
 * cases where it makes no sense for a protocol to have a "do nothing"
 * function, some default processing is provided.
 */

int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
{}
EXPORT_SYMBOL();

int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
		    int len, int flags)
{}
EXPORT_SYMBOL();

int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
{}
EXPORT_SYMBOL();

int sock_no_accept(struct socket *sock, struct socket *newsock,
		   struct proto_accept_arg *arg)
{}
EXPORT_SYMBOL();

int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
		    int peer)
{}
EXPORT_SYMBOL();

int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{}
EXPORT_SYMBOL();

int sock_no_listen(struct socket *sock, int backlog)
{}
EXPORT_SYMBOL();

int sock_no_shutdown(struct socket *sock, int how)
{}
EXPORT_SYMBOL();

int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
{}
EXPORT_SYMBOL();

int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
{}
EXPORT_SYMBOL();

int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
		    int flags)
{}
EXPORT_SYMBOL();

int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
{}
EXPORT_SYMBOL();

/*
 * When a file is received (via SCM_RIGHTS, etc), we must bump the
 * various sock-based usage counts.
 */
void __receive_sock(struct file *file)
{}

/*
 *	Default Socket Callbacks
 */

static void sock_def_wakeup(struct sock *sk)
{}

static void sock_def_error_report(struct sock *sk)
{}

void sock_def_readable(struct sock *sk)
{}

static void sock_def_write_space(struct sock *sk)
{}

/* An optimised version of sock_def_write_space(), should only be called
 * for SOCK_RCU_FREE sockets under RCU read section and after putting
 * ->sk_wmem_alloc.
 */
static void sock_def_write_space_wfree(struct sock *sk)
{}

static void sock_def_destruct(struct sock *sk)
{}

void sk_send_sigurg(struct sock *sk)
{}
EXPORT_SYMBOL();

void sk_reset_timer(struct sock *sk, struct timer_list* timer,
		    unsigned long expires)
{}
EXPORT_SYMBOL();

void sk_stop_timer(struct sock *sk, struct timer_list* timer)
{}
EXPORT_SYMBOL();

void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
{}
EXPORT_SYMBOL();

void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid)
{}
EXPORT_SYMBOL();

void sock_init_data(struct socket *sock, struct sock *sk)
{}
EXPORT_SYMBOL();

void lock_sock_nested(struct sock *sk, int subclass)
{}
EXPORT_SYMBOL();

void release_sock(struct sock *sk)
{}
EXPORT_SYMBOL();

bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
{}
EXPORT_SYMBOL();

int sock_gettstamp(struct socket *sock, void __user *userstamp,
		   bool timeval, bool time32)
{}
EXPORT_SYMBOL();

void sock_enable_timestamp(struct sock *sk, enum sock_flags flag)
{}

int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
		       int level, int type)
{}
EXPORT_SYMBOL();

/*
 *	Get a socket option on an socket.
 *
 *	FIX: POSIX 1003.1g is very ambiguous here. It states that
 *	asynchronous errors should be reported by getsockopt. We assume
 *	this means if you specify SO_ERROR (otherwise whats the point of it).
 */
int sock_common_getsockopt(struct socket *sock, int level, int optname,
			   char __user *optval, int __user *optlen)
{}
EXPORT_SYMBOL();

int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
			int flags)
{}
EXPORT_SYMBOL();

/*
 *	Set socket options on an inet socket.
 */
int sock_common_setsockopt(struct socket *sock, int level, int optname,
			   sockptr_t optval, unsigned int optlen)
{}
EXPORT_SYMBOL();

void sk_common_release(struct sock *sk)
{}
EXPORT_SYMBOL();

void sk_get_meminfo(const struct sock *sk, u32 *mem)
{}

#ifdef CONFIG_PROC_FS
static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);

int sock_prot_inuse_get(struct net *net, struct proto *prot)
{}
EXPORT_SYMBOL_GPL();

int sock_inuse_get(struct net *net)
{}

EXPORT_SYMBOL_GPL();

static int __net_init sock_inuse_init_net(struct net *net)
{}

static void __net_exit sock_inuse_exit_net(struct net *net)
{}

static struct pernet_operations net_inuse_ops =;

static __init int net_inuse_init(void)
{}

core_initcall(net_inuse_init);

static int assign_proto_idx(struct proto *prot)
{}

static void release_proto_idx(struct proto *prot)
{}
#else
static inline int assign_proto_idx(struct proto *prot)
{
	return 0;
}

static inline void release_proto_idx(struct proto *prot)
{
}

#endif

static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
{}

static int tw_prot_init(const struct proto *prot)
{}

static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
{}

static int req_prot_init(const struct proto *prot)
{}

int proto_register(struct proto *prot, int alloc_slab)
{}
EXPORT_SYMBOL();

void proto_unregister(struct proto *prot)
{}
EXPORT_SYMBOL();

int sock_load_diag_module(int family, int protocol)
{}
EXPORT_SYMBOL();

#ifdef CONFIG_PROC_FS
static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
	__acquires(proto_list_mutex)
{}

static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{}

static void proto_seq_stop(struct seq_file *seq, void *v)
	__releases(proto_list_mutex)
{}

static char proto_method_implemented(const void *method)
{}
static long sock_prot_memory_allocated(struct proto *proto)
{}

static const char *sock_prot_memory_pressure(struct proto *proto)
{}

static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
{}

static int proto_seq_show(struct seq_file *seq, void *v)
{}

static const struct seq_operations proto_seq_ops =;

static __net_init int proto_init_net(struct net *net)
{}

static __net_exit void proto_exit_net(struct net *net)
{}


static __net_initdata struct pernet_operations proto_net_ops =;

static int __init proto_init(void)
{}

subsys_initcall(proto_init);

#endif /* PROC_FS */

#ifdef CONFIG_NET_RX_BUSY_POLL
bool sk_busy_loop_end(void *p, unsigned long start_time)
{}
EXPORT_SYMBOL();
#endif /* CONFIG_NET_RX_BUSY_POLL */

int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len)
{}
EXPORT_SYMBOL();

/* Copy 'size' bytes from userspace and return `size` back to userspace */
int sock_ioctl_inout(struct sock *sk, unsigned int cmd,
		     void __user *arg, void *karg, size_t size)
{}
EXPORT_SYMBOL();

/* This is the most common ioctl prep function, where the result (4 bytes) is
 * copied back to userspace if the ioctl() returns successfully. No input is
 * copied from userspace as input argument.
 */
static int sock_ioctl_out(struct sock *sk, unsigned int cmd, void __user *arg)
{}

/* A wrapper around sock ioctls, which copies the data from userspace
 * (depending on the protocol/ioctl), and copies back the result to userspace.
 * The main motivation for this function is to pass kernel memory to the
 * protocol ioctl callbacks, instead of userspace memory.
 */
int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
{}
EXPORT_SYMBOL();

static int __init sock_struct_check(void)
{}

core_initcall(sock_struct_check);