linux/net/ipv4/tcp.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Implementation of the Transmission Control Protocol(TCP).
 *
 * Authors:	Ross Biro
 *		Fred N. van Kempen, <[email protected]>
 *		Mark Evans, <[email protected]>
 *		Corey Minyard <[email protected]>
 *		Florian La Roche, <[email protected]>
 *		Charles Hedrick, <[email protected]>
 *		Linus Torvalds, <[email protected]>
 *		Alan Cox, <[email protected]>
 *		Matthew Dillon, <[email protected]>
 *		Arnt Gulbrandsen, <[email protected]>
 *		Jorge Cwik, <[email protected]>
 *
 * Fixes:
 *		Alan Cox	:	Numerous verify_area() calls
 *		Alan Cox	:	Set the ACK bit on a reset
 *		Alan Cox	:	Stopped it crashing if it closed while
 *					sk->inuse=1 and was trying to connect
 *					(tcp_err()).
 *		Alan Cox	:	All icmp error handling was broken
 *					pointers passed where wrong and the
 *					socket was looked up backwards. Nobody
 *					tested any icmp error code obviously.
 *		Alan Cox	:	tcp_err() now handled properly. It
 *					wakes people on errors. poll
 *					behaves and the icmp error race
 *					has gone by moving it into sock.c
 *		Alan Cox	:	tcp_send_reset() fixed to work for
 *					everything not just packets for
 *					unknown sockets.
 *		Alan Cox	:	tcp option processing.
 *		Alan Cox	:	Reset tweaked (still not 100%) [Had
 *					syn rule wrong]
 *		Herp Rosmanith  :	More reset fixes
 *		Alan Cox	:	No longer acks invalid rst frames.
 *					Acking any kind of RST is right out.
 *		Alan Cox	:	Sets an ignore me flag on an rst
 *					receive otherwise odd bits of prattle
 *					escape still
 *		Alan Cox	:	Fixed another acking RST frame bug.
 *					Should stop LAN workplace lockups.
 *		Alan Cox	: 	Some tidyups using the new skb list
 *					facilities
 *		Alan Cox	:	sk->keepopen now seems to work
 *		Alan Cox	:	Pulls options out correctly on accepts
 *		Alan Cox	:	Fixed assorted sk->rqueue->next errors
 *		Alan Cox	:	PSH doesn't end a TCP read. Switched a
 *					bit to skb ops.
 *		Alan Cox	:	Tidied tcp_data to avoid a potential
 *					nasty.
 *		Alan Cox	:	Added some better commenting, as the
 *					tcp is hard to follow
 *		Alan Cox	:	Removed incorrect check for 20 * psh
 *	Michael O'Reilly	:	ack < copied bug fix.
 *	Johannes Stille		:	Misc tcp fixes (not all in yet).
 *		Alan Cox	:	FIN with no memory -> CRASH
 *		Alan Cox	:	Added socket option proto entries.
 *					Also added awareness of them to accept.
 *		Alan Cox	:	Added TCP options (SOL_TCP)
 *		Alan Cox	:	Switched wakeup calls to callbacks,
 *					so the kernel can layer network
 *					sockets.
 *		Alan Cox	:	Use ip_tos/ip_ttl settings.
 *		Alan Cox	:	Handle FIN (more) properly (we hope).
 *		Alan Cox	:	RST frames sent on unsynchronised
 *					state ack error.
 *		Alan Cox	:	Put in missing check for SYN bit.
 *		Alan Cox	:	Added tcp_select_window() aka NET2E
 *					window non shrink trick.
 *		Alan Cox	:	Added a couple of small NET2E timer
 *					fixes
 *		Charles Hedrick :	TCP fixes
 *		Toomas Tamm	:	TCP window fixes
 *		Alan Cox	:	Small URG fix to rlogin ^C ack fight
 *		Charles Hedrick	:	Rewrote most of it to actually work
 *		Linus		:	Rewrote tcp_read() and URG handling
 *					completely
 *		Gerhard Koerting:	Fixed some missing timer handling
 *		Matthew Dillon  :	Reworked TCP machine states as per RFC
 *		Gerhard Koerting:	PC/TCP workarounds
 *		Adam Caldwell	:	Assorted timer/timing errors
 *		Matthew Dillon	:	Fixed another RST bug
 *		Alan Cox	:	Move to kernel side addressing changes.
 *		Alan Cox	:	Beginning work on TCP fastpathing
 *					(not yet usable)
 *		Arnt Gulbrandsen:	Turbocharged tcp_check() routine.
 *		Alan Cox	:	TCP fast path debugging
 *		Alan Cox	:	Window clamping
 *		Michael Riepe	:	Bug in tcp_check()
 *		Matt Dillon	:	More TCP improvements and RST bug fixes
 *		Matt Dillon	:	Yet more small nasties remove from the
 *					TCP code (Be very nice to this man if
 *					tcp finally works 100%) 8)
 *		Alan Cox	:	BSD accept semantics.
 *		Alan Cox	:	Reset on closedown bug.
 *	Peter De Schrijver	:	ENOTCONN check missing in tcp_sendto().
 *		Michael Pall	:	Handle poll() after URG properly in
 *					all cases.
 *		Michael Pall	:	Undo the last fix in tcp_read_urg()
 *					(multi URG PUSH broke rlogin).
 *		Michael Pall	:	Fix the multi URG PUSH problem in
 *					tcp_readable(), poll() after URG
 *					works now.
 *		Michael Pall	:	recv(...,MSG_OOB) never blocks in the
 *					BSD api.
 *		Alan Cox	:	Changed the semantics of sk->socket to
 *					fix a race and a signal problem with
 *					accept() and async I/O.
 *		Alan Cox	:	Relaxed the rules on tcp_sendto().
 *		Yury Shevchuk	:	Really fixed accept() blocking problem.
 *		Craig I. Hagan  :	Allow for BSD compatible TIME_WAIT for
 *					clients/servers which listen in on
 *					fixed ports.
 *		Alan Cox	:	Cleaned the above up and shrank it to
 *					a sensible code size.
 *		Alan Cox	:	Self connect lockup fix.
 *		Alan Cox	:	No connect to multicast.
 *		Ross Biro	:	Close unaccepted children on master
 *					socket close.
 *		Alan Cox	:	Reset tracing code.
 *		Alan Cox	:	Spurious resets on shutdown.
 *		Alan Cox	:	Giant 15 minute/60 second timer error
 *		Alan Cox	:	Small whoops in polling before an
 *					accept.
 *		Alan Cox	:	Kept the state trace facility since
 *					it's handy for debugging.
 *		Alan Cox	:	More reset handler fixes.
 *		Alan Cox	:	Started rewriting the code based on
 *					the RFC's for other useful protocol
 *					references see: Comer, KA9Q NOS, and
 *					for a reference on the difference
 *					between specifications and how BSD
 *					works see the 4.4lite source.
 *		A.N.Kuznetsov	:	Don't time wait on completion of tidy
 *					close.
 *		Linus Torvalds	:	Fin/Shutdown & copied_seq changes.
 *		Linus Torvalds	:	Fixed BSD port reuse to work first syn
 *		Alan Cox	:	Reimplemented timers as per the RFC
 *					and using multiple timers for sanity.
 *		Alan Cox	:	Small bug fixes, and a lot of new
 *					comments.
 *		Alan Cox	:	Fixed dual reader crash by locking
 *					the buffers (much like datagram.c)
 *		Alan Cox	:	Fixed stuck sockets in probe. A probe
 *					now gets fed up of retrying without
 *					(even a no space) answer.
 *		Alan Cox	:	Extracted closing code better
 *		Alan Cox	:	Fixed the closing state machine to
 *					resemble the RFC.
 *		Alan Cox	:	More 'per spec' fixes.
 *		Jorge Cwik	:	Even faster checksumming.
 *		Alan Cox	:	tcp_data() doesn't ack illegal PSH
 *					only frames. At least one pc tcp stack
 *					generates them.
 *		Alan Cox	:	Cache last socket.
 *		Alan Cox	:	Per route irtt.
 *		Matt Day	:	poll()->select() match BSD precisely on error
 *		Alan Cox	:	New buffers
 *		Marc Tamsky	:	Various sk->prot->retransmits and
 *					sk->retransmits misupdating fixed.
 *					Fixed tcp_write_timeout: stuck close,
 *					and TCP syn retries gets used now.
 *		Mark Yarvis	:	In tcp_read_wakeup(), don't send an
 *					ack if state is TCP_CLOSED.
 *		Alan Cox	:	Look up device on a retransmit - routes may
 *					change. Doesn't yet cope with MSS shrink right
 *					but it's a start!
 *		Marc Tamsky	:	Closing in closing fixes.
 *		Mike Shaver	:	RFC1122 verifications.
 *		Alan Cox	:	rcv_saddr errors.
 *		Alan Cox	:	Block double connect().
 *		Alan Cox	:	Small hooks for enSKIP.
 *		Alexey Kuznetsov:	Path MTU discovery.
 *		Alan Cox	:	Support soft errors.
 *		Alan Cox	:	Fix MTU discovery pathological case
 *					when the remote claims no mtu!
 *		Marc Tamsky	:	TCP_CLOSE fix.
 *		Colin (G3TNE)	:	Send a reset on syn ack replies in
 *					window but wrong (fixes NT lpd problems)
 *		Pedro Roque	:	Better TCP window handling, delayed ack.
 *		Joerg Reuter	:	No modification of locked buffers in
 *					tcp_do_retransmit()
 *		Eric Schenk	:	Changed receiver side silly window
 *					avoidance algorithm to BSD style
 *					algorithm. This doubles throughput
 *					against machines running Solaris,
 *					and seems to result in general
 *					improvement.
 *	Stefan Magdalinski	:	adjusted tcp_readable() to fix FIONREAD
 *	Willy Konynenberg	:	Transparent proxying support.
 *	Mike McLagan		:	Routing by source
 *		Keith Owens	:	Do proper merging with partial SKB's in
 *					tcp_do_sendmsg to avoid burstiness.
 *		Eric Schenk	:	Fix fast close down bug with
 *					shutdown() followed by close().
 *		Andi Kleen 	:	Make poll agree with SIGIO
 *	Salvatore Sanfilippo	:	Support SO_LINGER with linger == 1 and
 *					lingertime == 0 (RFC 793 ABORT Call)
 *	Hirokazu Takahashi	:	Use copy_from_user() instead of
 *					csum_and_copy_from_user() if possible.
 *
 * Description of States:
 *
 *	TCP_SYN_SENT		sent a connection request, waiting for ack
 *
 *	TCP_SYN_RECV		received a connection request, sent ack,
 *				waiting for final ack in three-way handshake.
 *
 *	TCP_ESTABLISHED		connection established
 *
 *	TCP_FIN_WAIT1		our side has shutdown, waiting to complete
 *				transmission of remaining buffered data
 *
 *	TCP_FIN_WAIT2		all buffered data sent, waiting for remote
 *				to shutdown
 *
 *	TCP_CLOSING		both sides have shutdown but we still have
 *				data we have to finish sending
 *
 *	TCP_TIME_WAIT		timeout to catch resent junk before entering
 *				closed, can only be entered from FIN_WAIT2
 *				or CLOSING.  Required because the other end
 *				may not have gotten our last ACK causing it
 *				to retransmit the data packet (which we ignore)
 *
 *	TCP_CLOSE_WAIT		remote side has shutdown and is waiting for
 *				us to finish writing our data and to shutdown
 *				(we have to close() to move on to LAST_ACK)
 *
 *	TCP_LAST_ACK		out side has shutdown after remote has
 *				shutdown.  There may still be data in our
 *				buffer that we have to finish sending
 *
 *	TCP_CLOSE		socket is finished
 */

#define pr_fmt(fmt)

#include <crypto/hash.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/inet_diag.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/skbuff.h>
#include <linux/scatterlist.h>
#include <linux/splice.h>
#include <linux/net.h>
#include <linux/socket.h>
#include <linux/random.h>
#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/time.h>
#include <linux/slab.h>
#include <linux/errqueue.h>
#include <linux/static_key.h>
#include <linux/btf.h>

#include <net/icmp.h>
#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/mptcp.h>
#include <net/proto_memory.h>
#include <net/xfrm.h>
#include <net/ip.h>
#include <net/sock.h>
#include <net/rstreason.h>

#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <net/busy_poll.h>
#include <net/hotdata.h>
#include <trace/events/tcp.h>
#include <net/rps.h>

/* Track pending CMSGs. */
enum {};

DEFINE_PER_CPU(unsigned int, tcp_orphan_count);
EXPORT_PER_CPU_SYMBOL_GPL();

DEFINE_PER_CPU(u32, tcp_tw_isn);
EXPORT_PER_CPU_SYMBOL_GPL();

long sysctl_tcp_mem[3] __read_mostly;
EXPORT_SYMBOL();

atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp;	/* Current allocated memory. */
EXPORT_SYMBOL();
DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
EXPORT_PER_CPU_SYMBOL_GPL();

#if IS_ENABLED(CONFIG_SMC)
DEFINE_STATIC_KEY_FALSE(tcp_have_smc);
EXPORT_SYMBOL();
#endif

/*
 * Current number of TCP sockets.
 */
struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp;
EXPORT_SYMBOL();

/*
 * TCP splice context
 */
struct tcp_splice_state {};

/*
 * Pressure flag: try to collapse.
 * Technical note: it is used by multiple contexts non atomically.
 * All the __sk_mem_schedule() is of this nature: accounting
 * is strict, actions are advisory and have some latency.
 */
unsigned long tcp_memory_pressure __read_mostly;
EXPORT_SYMBOL_GPL();

void tcp_enter_memory_pressure(struct sock *sk)
{}
EXPORT_SYMBOL_GPL();

void tcp_leave_memory_pressure(struct sock *sk)
{}
EXPORT_SYMBOL_GPL();

/* Convert seconds to retransmits based on initial and max timeout */
static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
{}

/* Convert retransmits to seconds based on initial and max timeout */
static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
{}

static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp)
{}

/* Address-family independent initialization for a tcp_sock.
 *
 * NOTE: A lot of things set to zero explicitly by call to
 *       sk_alloc() so need not be done here.
 */
void tcp_init_sock(struct sock *sk)
{}
EXPORT_SYMBOL();

static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
{}

static bool tcp_stream_is_readable(struct sock *sk, int target)
{}

/*
 *	Wait for a TCP event.
 *
 *	Note that we don't need to lock the socket, as the upper poll layers
 *	take care of normal races (between the test and the event) and we don't
 *	go look at any of the socket buffers directly.
 */
__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
{}
EXPORT_SYMBOL();

int tcp_ioctl(struct sock *sk, int cmd, int *karg)
{}
EXPORT_SYMBOL();

void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{}

static inline bool forced_push(const struct tcp_sock *tp)
{}

void tcp_skb_entail(struct sock *sk, struct sk_buff *skb)
{}

static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
{}

/* If a not yet filled skb is pushed, do not send it if
 * we have data packets in Qdisc or NIC queues :
 * Because TX completion will happen shortly, it gives a chance
 * to coalesce future sendmsg() payload into this skb, without
 * need for a timer, and with no latency trade off.
 * As packets containing data payload have a bigger truesize
 * than pure acks (dataless) packets, the last checks prevent
 * autocorking if we only have an ACK in Qdisc/NIC queues,
 * or if TX completion was delayed after we processed ACK packet.
 */
static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
				int size_goal)
{}

void tcp_push(struct sock *sk, int flags, int mss_now,
	      int nonagle, int size_goal)
{}

static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
				unsigned int offset, size_t len)
{}

static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
{}

/**
 *  tcp_splice_read - splice data from TCP socket to a pipe
 * @sock:	socket to splice from
 * @ppos:	position (not valid)
 * @pipe:	pipe to splice to
 * @len:	number of bytes to splice
 * @flags:	splice modifier flags
 *
 * Description:
 *    Will read pages from given socket and fill them into a pipe.
 *
 **/
ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
			struct pipe_inode_info *pipe, size_t len,
			unsigned int flags)
{}
EXPORT_SYMBOL();

struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
				     bool force_schedule)
{}

static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
				       int large_allowed)
{}

int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
{}

/* In some cases, sendmsg() could have added an skb to the write queue,
 * but failed adding payload on it. We need to remove it to consume less
 * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger
 * epoll() users. Another reason is that tcp_write_xmit() does not like
 * finding an empty skb in the write queue.
 */
void tcp_remove_empty_skb(struct sock *sk)
{}

/* skb changing from pure zc to mixed, must charge zc */
static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb)
{}


int tcp_wmem_schedule(struct sock *sk, int copy)
{}

void tcp_free_fastopen_req(struct tcp_sock *tp)
{}

int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
			 size_t size, struct ubuf_info *uarg)
{}

int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
{}
EXPORT_SYMBOL_GPL();

int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{}
EXPORT_SYMBOL();

void tcp_splice_eof(struct socket *sock)
{}
EXPORT_SYMBOL_GPL();

/*
 *	Handle reading urgent data. BSD has very simple semantics for
 *	this, no blocking and very strange errors 8)
 */

static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
{}

static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
{}

/* Clean up the receive buffer for full frames taken by the user,
 * then send an ACK if necessary.  COPIED is the number of bytes
 * tcp_recvmsg has given to the user so far, it speeds up the
 * calculation of whether or not we must ACK for the sake of
 * a window update.
 */
void __tcp_cleanup_rbuf(struct sock *sk, int copied)
{}

void tcp_cleanup_rbuf(struct sock *sk, int copied)
{}

static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
{}

struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
{}
EXPORT_SYMBOL();

/*
 * This routine provides an alternative to tcp_recvmsg() for routines
 * that would like to handle copying from skbuffs directly in 'sendfile'
 * fashion.
 * Note:
 *	- It is assumed that the socket was locked by the caller.
 *	- The routine does not block.
 *	- At present, there is no support for reading OOB data
 *	  or for 'peeking' the socket using this routine
 *	  (although both would be easy to implement).
 */
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
		  sk_read_actor_t recv_actor)
{}
EXPORT_SYMBOL();

int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{}
EXPORT_SYMBOL();

void tcp_read_done(struct sock *sk, size_t len)
{}
EXPORT_SYMBOL();

int tcp_peek_len(struct socket *sock)
{}
EXPORT_SYMBOL();

/* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */
int tcp_set_rcvlowat(struct sock *sk, int val)
{}
EXPORT_SYMBOL();

void tcp_update_recv_tstamps(struct sk_buff *skb,
			     struct scm_timestamping_internal *tss)
{}

#ifdef CONFIG_MMU
static const struct vm_operations_struct tcp_vm_ops =;

int tcp_mmap(struct file *file, struct socket *sock,
	     struct vm_area_struct *vma)
{}
EXPORT_SYMBOL();

static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb,
				       u32 *offset_frag)
{}

static bool can_map_frag(const skb_frag_t *frag)
{}

static int find_next_mappable_frag(const skb_frag_t *frag,
				   int remaining_in_skb)
{}

static void tcp_zerocopy_set_hint_for_skb(struct sock *sk,
					  struct tcp_zerocopy_receive *zc,
					  struct sk_buff *skb, u32 offset)
{}

static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
			      int flags, struct scm_timestamping_internal *tss,
			      int *cmsg_flags);
static int receive_fallback_to_copy(struct sock *sk,
				    struct tcp_zerocopy_receive *zc, int inq,
				    struct scm_timestamping_internal *tss)
{}

static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc,
				   struct sk_buff *skb, u32 copylen,
				   u32 *offset, u32 *seq)
{}

static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc,
				  struct sock *sk,
				  struct sk_buff *skb,
				  u32 *seq,
				  s32 copybuf_len,
				  struct scm_timestamping_internal *tss)
{}

static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma,
					      struct page **pending_pages,
					      unsigned long pages_remaining,
					      unsigned long *address,
					      u32 *length,
					      u32 *seq,
					      struct tcp_zerocopy_receive *zc,
					      u32 total_bytes_to_map,
					      int err)
{}

static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma,
					struct page **pages,
					unsigned int pages_to_map,
					unsigned long *address,
					u32 *length,
					u32 *seq,
					struct tcp_zerocopy_receive *zc,
					u32 total_bytes_to_map)
{}

#define TCP_VALID_ZC_MSG_FLAGS
static void tcp_zc_finalize_rx_tstamp(struct sock *sk,
				      struct tcp_zerocopy_receive *zc,
				      struct scm_timestamping_internal *tss)
{}

static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm,
					   unsigned long address,
					   bool *mmap_locked)
{}

#define TCP_ZEROCOPY_PAGE_BATCH_SIZE
static int tcp_zerocopy_receive(struct sock *sk,
				struct tcp_zerocopy_receive *zc,
				struct scm_timestamping_internal *tss)
{}
#endif

/* Similar to __sock_recv_timestamp, but does not require an skb */
void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
			struct scm_timestamping_internal *tss)
{}

static int tcp_inq_hint(struct sock *sk)
{}

/*
 *	This routine copies from a sock struct into the user buffer.
 *
 *	Technical note: in 2.3 we work on _locked_ socket, so that
 *	tricks with *seq access order and skb->users are not required.
 *	Probably, code can be easily improved even more.
 */

static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
			      int flags, struct scm_timestamping_internal *tss,
			      int *cmsg_flags)
{}

int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
		int *addr_len)
{}
EXPORT_SYMBOL();

void tcp_set_state(struct sock *sk, int state)
{}
EXPORT_SYMBOL_GPL();

/*
 *	State processing on a close. This implements the state shift for
 *	sending our FIN frame. Note that we only send a FIN for some
 *	states. A shutdown() may have already sent the FIN, or we may be
 *	closed.
 */

static const unsigned char new_state[16] =;

static int tcp_close_state(struct sock *sk)
{}

/*
 *	Shutdown the sending side of a connection. Much like close except
 *	that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
 */

void tcp_shutdown(struct sock *sk, int how)
{}
EXPORT_SYMBOL();

int tcp_orphan_count_sum(void)
{}

static int tcp_orphan_cache;
static struct timer_list tcp_orphan_timer;
#define TCP_ORPHAN_TIMER_PERIOD

static void tcp_orphan_update(struct timer_list *unused)
{}

static bool tcp_too_many_orphans(int shift)
{}

static bool tcp_out_of_memory(const struct sock *sk)
{}

bool tcp_check_oom(const struct sock *sk, int shift)
{}

void __tcp_close(struct sock *sk, long timeout)
{}

void tcp_close(struct sock *sk, long timeout)
{}
EXPORT_SYMBOL();

/* These states need RST on ABORT according to RFC793 */

static inline bool tcp_need_reset(int state)
{}

static void tcp_rtx_queue_purge(struct sock *sk)
{}

void tcp_write_queue_purge(struct sock *sk)
{}

int tcp_disconnect(struct sock *sk, int flags)
{}
EXPORT_SYMBOL();

static inline bool tcp_can_repair_sock(const struct sock *sk)
{}

static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len)
{}

static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf,
		unsigned int len)
{}

DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
EXPORT_SYMBOL();

static void tcp_enable_tx_delay(void)
{}

/* When set indicates to always queue non-full frames.  Later the user clears
 * this option and we transmit any pending partial frames in the queue.  This is
 * meant to be used alongside sendfile() to get properly filled frames when the
 * user (for example) must write out headers with a write() call first and then
 * use sendfile to send out the data parts.
 *
 * TCP_CORK can be set together with TCP_NODELAY and it is stronger than
 * TCP_NODELAY.
 */
void __tcp_sock_set_cork(struct sock *sk, bool on)
{}

void tcp_sock_set_cork(struct sock *sk, bool on)
{}
EXPORT_SYMBOL();

/* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is
 * remembered, but it is not activated until cork is cleared.
 *
 * However, when TCP_NODELAY is set we make an explicit push, which overrides
 * even TCP_CORK for currently queued segments.
 */
void __tcp_sock_set_nodelay(struct sock *sk, bool on)
{}

void tcp_sock_set_nodelay(struct sock *sk)
{}
EXPORT_SYMBOL();

static void __tcp_sock_set_quickack(struct sock *sk, int val)
{}

void tcp_sock_set_quickack(struct sock *sk, int val)
{}
EXPORT_SYMBOL();

int tcp_sock_set_syncnt(struct sock *sk, int val)
{}
EXPORT_SYMBOL();

int tcp_sock_set_user_timeout(struct sock *sk, int val)
{}
EXPORT_SYMBOL();

int tcp_sock_set_keepidle_locked(struct sock *sk, int val)
{}

int tcp_sock_set_keepidle(struct sock *sk, int val)
{}
EXPORT_SYMBOL();

int tcp_sock_set_keepintvl(struct sock *sk, int val)
{}
EXPORT_SYMBOL();

int tcp_sock_set_keepcnt(struct sock *sk, int val)
{}
EXPORT_SYMBOL();

int tcp_set_window_clamp(struct sock *sk, int val)
{}

/*
 *	Socket option code for TCP.
 */
int do_tcp_setsockopt(struct sock *sk, int level, int optname,
		      sockptr_t optval, unsigned int optlen)
{}

int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
		   unsigned int optlen)
{}
EXPORT_SYMBOL();

static void tcp_get_info_chrono_stats(const struct tcp_sock *tp,
				      struct tcp_info *info)
{}

/* Return information about state of tcp endpoint in API format. */
void tcp_get_info(struct sock *sk, struct tcp_info *info)
{}
EXPORT_SYMBOL_GPL();

static size_t tcp_opt_stats_get_size(void)
{}

/* Returns TTL or hop limit of an incoming packet from skb. */
static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb)
{}

struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
					       const struct sk_buff *orig_skb,
					       const struct sk_buff *ack_skb)
{}

int do_tcp_getsockopt(struct sock *sk, int level,
		      int optname, sockptr_t optval, sockptr_t optlen)
{}

bool tcp_bpf_bypass_getsockopt(int level, int optname)
{}
EXPORT_SYMBOL();

int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
		   int __user *optlen)
{}
EXPORT_SYMBOL();

#ifdef CONFIG_TCP_MD5SIG
int tcp_md5_sigpool_id =;
EXPORT_SYMBOL_GPL();

int tcp_md5_alloc_sigpool(void)
{}

void tcp_md5_release_sigpool(void)
{}

void tcp_md5_add_sigpool(void)
{}

int tcp_md5_hash_key(struct tcp_sigpool *hp,
		     const struct tcp_md5sig_key *key)
{}
EXPORT_SYMBOL();

/* Called with rcu_read_lock() */
static enum skb_drop_reason
tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
		     const void *saddr, const void *daddr,
		     int family, int l3index, const __u8 *hash_location)
{}
#else
static inline enum skb_drop_reason
tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
		     const void *saddr, const void *daddr,
		     int family, int l3index, const __u8 *hash_location)
{
	return SKB_NOT_DROPPED_YET;
}

#endif

/* Called with rcu_read_lock() */
enum skb_drop_reason
tcp_inbound_hash(struct sock *sk, const struct request_sock *req,
		 const struct sk_buff *skb,
		 const void *saddr, const void *daddr,
		 int family, int dif, int sdif)
{}
EXPORT_SYMBOL_GPL();

void tcp_done(struct sock *sk)
{}
EXPORT_SYMBOL_GPL();

int tcp_abort(struct sock *sk, int err)
{}
EXPORT_SYMBOL_GPL();

extern struct tcp_congestion_ops tcp_reno;

static __initdata unsigned long thash_entries;
static int __init set_thash_entries(char *str)
{}
__setup();

static void __init tcp_init_mem(void)
{}

static void __init tcp_struct_check(void)
{}

void __init tcp_init(void)
{}