linux/net/ipv4/tcp_ipv4.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Implementation of the Transmission Control Protocol(TCP).
 *
 *		IPv4 specific functions
 *
 *		code split from:
 *		linux/ipv4/tcp.c
 *		linux/ipv4/tcp_input.c
 *		linux/ipv4/tcp_output.c
 *
 *		See tcp.c for author information
 */

/*
 * Changes:
 *		David S. Miller	:	New socket lookup architecture.
 *					This code is dedicated to John Dyson.
 *		David S. Miller :	Change semantics of established hash,
 *					half is devoted to TIME_WAIT sockets
 *					and the rest go in the other half.
 *		Andi Kleen :		Add support for syncookies and fixed
 *					some bugs: ip options weren't passed to
 *					the TCP layer, missed a check for an
 *					ACK bit.
 *		Andi Kleen :		Implemented fast path mtu discovery.
 *	     				Fixed many serious bugs in the
 *					request_sock handling and moved
 *					most of it into the af independent code.
 *					Added tail drop and some other bugfixes.
 *					Added new listen semantics.
 *		Mike McLagan	:	Routing by source
 *	Juan Jose Ciarlante:		ip_dynaddr bits
 *		Andi Kleen:		various fixes.
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
 *					coma.
 *	Andi Kleen		:	Fix new listen.
 *	Andi Kleen		:	Fix accept error reporting.
 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
 *					a single port at the same time.
 */

#define pr_fmt(fmt)

#include <linux/bottom_half.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/cache.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/times.h>
#include <linux/slab.h>
#include <linux/sched.h>

#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/inet_hashtables.h>
#include <net/tcp.h>
#include <net/transp_v6.h>
#include <net/ipv6.h>
#include <net/inet_common.h>
#include <net/timewait_sock.h>
#include <net/xfrm.h>
#include <net/secure_seq.h>
#include <net/busy_poll.h>
#include <net/rstreason.h>

#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/inetdevice.h>
#include <linux/btf_ids.h>

#include <crypto/hash.h>
#include <linux/scatterlist.h>

#include <trace/events/tcp.h>

#ifdef CONFIG_TCP_MD5SIG
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
#endif

struct inet_hashinfo tcp_hashinfo;
EXPORT_SYMBOL();

static DEFINE_PER_CPU(struct sock_bh_locked, ipv4_tcp_sk) =;

static u32 tcp_v4_init_seq(const struct sk_buff *skb)
{}

static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
{}

int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
{}
EXPORT_SYMBOL_GPL();

static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
			      int addr_len)
{}

/* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{}
EXPORT_SYMBOL();

/*
 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
 * It can be called through tcp_release_cb() if socket was owned by user
 * at the time tcp_v4_err() was called to handle ICMP message.
 */
void tcp_v4_mtu_reduced(struct sock *sk)
{}
EXPORT_SYMBOL();

static void do_redirect(struct sk_buff *skb, struct sock *sk)
{}


/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
void tcp_req_err(struct sock *sk, u32 seq, bool abort)
{}
EXPORT_SYMBOL();

/* TCP-LD (RFC 6069) logic */
void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
{}
EXPORT_SYMBOL();

/*
 * This routine is called by the ICMP module when it gets some
 * sort of error condition.  If err < 0 then the socket should
 * be closed and the error returned to the user.  If err > 0
 * it's just the icmp type << 8 | icmp code.  After adjustment
 * header points to the first 8 bytes of the tcp header.  We need
 * to find the appropriate port.
 *
 * The locking strategy used here is very "optimistic". When
 * someone else accesses the socket the ICMP is just dropped
 * and for some paths there is no check at all.
 * A more general error queue to queue errors for later handling
 * is probably better.
 *
 */

int tcp_v4_err(struct sk_buff *skb, u32 info)
{}

void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
{}

/* This routine computes an IPv4 TCP checksum. */
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
{}
EXPORT_SYMBOL();

#define REPLY_OPTIONS_LEN

static bool tcp_v4_ao_sign_reset(const struct sock *sk, struct sk_buff *skb,
				 const struct tcp_ao_hdr *aoh,
				 struct ip_reply_arg *arg, struct tcphdr *reply,
				 __be32 reply_options[REPLY_OPTIONS_LEN])
{}

/*
 *	This routine will send an RST to the other tcp.
 *
 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 *		      for reset.
 *	Answer: if a packet caused RST, it is not for a socket
 *		existing in our system, if it is matched to a socket,
 *		it is just duplicate segment or bug in other side's TCP.
 *		So that we build reply only basing on parameters
 *		arrived with segment.
 *	Exception: precedence violation. We do not implement it in any case.
 */

static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb,
			      enum sk_rst_reason reason)
{}

/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
   outside socket context is ugly, certainly. What can I do?
 */

static void tcp_v4_send_ack(const struct sock *sk,
			    struct sk_buff *skb, u32 seq, u32 ack,
			    u32 win, u32 tsval, u32 tsecr, int oif,
			    struct tcp_key *key,
			    int reply_flags, u8 tos, u32 txhash)
{}

static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
{}

static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
				  struct request_sock *req)
{}

/*
 *	Send a SYN-ACK after having received a SYN.
 *	This still operates on a request_sock only, not on a big
 *	socket.
 */
static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
			      struct flowi *fl,
			      struct request_sock *req,
			      struct tcp_fastopen_cookie *foc,
			      enum tcp_synack_type synack_type,
			      struct sk_buff *syn_skb)
{}

/*
 *	IPv4 request_sock destructor.
 */
static void tcp_v4_reqsk_destructor(struct request_sock *req)
{}

#ifdef CONFIG_TCP_MD5SIG
/*
 * RFC2385 MD5 checksumming requires a mapping of
 * IP address->MD5 Key.
 * We need to maintain these in the sk structure.
 */

DEFINE_STATIC_KEY_DEFERRED_FALSE();
EXPORT_SYMBOL();

static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
{}

/* Find the Key structure for an address.  */
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
					   const union tcp_md5_addr *addr,
					   int family, bool any_l3index)
{}
EXPORT_SYMBOL();

static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
						      const union tcp_md5_addr *addr,
						      int family, u8 prefixlen,
						      int l3index, u8 flags)
{}

struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
					 const struct sock *addr_sk)
{}
EXPORT_SYMBOL();

static int tcp_md5sig_info_add(struct sock *sk, gfp_t gfp)
{}

/* This can be called on a newly created socket, from other files */
static int __tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
			    int family, u8 prefixlen, int l3index, u8 flags,
			    const u8 *newkey, u8 newkeylen, gfp_t gfp)
{}

int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
		   int family, u8 prefixlen, int l3index, u8 flags,
		   const u8 *newkey, u8 newkeylen)
{}
EXPORT_SYMBOL();

int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
		     int family, u8 prefixlen, int l3index,
		     struct tcp_md5sig_key *key)
{}
EXPORT_SYMBOL();

int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
		   u8 prefixlen, int l3index, u8 flags)
{}
EXPORT_SYMBOL();

void tcp_clear_md5_list(struct sock *sk)
{}

static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
				 sockptr_t optval, int optlen)
{}

static int tcp_v4_md5_hash_headers(struct tcp_sigpool *hp,
				   __be32 daddr, __be32 saddr,
				   const struct tcphdr *th, int nbytes)
{}

static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
{}

int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
			const struct sock *sk,
			const struct sk_buff *skb)
{}
EXPORT_SYMBOL();

#endif

static void tcp_v4_init_req(struct request_sock *req,
			    const struct sock *sk_listener,
			    struct sk_buff *skb)
{}

static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
					  struct sk_buff *skb,
					  struct flowi *fl,
					  struct request_sock *req,
					  u32 tw_isn)
{}

struct request_sock_ops tcp_request_sock_ops __read_mostly =;

const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops =;

int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{}
EXPORT_SYMBOL();


/*
 * The three way handshake has completed - we got a valid synack -
 * now create the new socket.
 */
struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
				  struct request_sock *req,
				  struct dst_entry *dst,
				  struct request_sock *req_unhash,
				  bool *own_req)
{}
EXPORT_SYMBOL();

static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
{}

u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
			 struct tcphdr *th, u32 *cookie)
{}

INDIRECT_CALLABLE_DECLARE();
/* The socket must have it's spinlock held when we get
 * here, unless it is a TCP_LISTEN socket.
 *
 * We have a potential double-lock case here, so even when
 * doing backlog processing we use the BH locking scheme.
 * This is because we cannot sleep with the original spinlock
 * held.
 */
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{}
EXPORT_SYMBOL();

int tcp_v4_early_demux(struct sk_buff *skb)
{}

bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
		     enum skb_drop_reason *reason)
{}
EXPORT_SYMBOL();

int tcp_filter(struct sock *sk, struct sk_buff *skb)
{}
EXPORT_SYMBOL();

static void tcp_v4_restore_cb(struct sk_buff *skb)
{}

static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
			   const struct tcphdr *th)
{}

/*
 *	From tcp_input.c
 */

int tcp_v4_rcv(struct sk_buff *skb)
{}

static struct timewait_sock_ops tcp_timewait_sock_ops =;

void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
{}
EXPORT_SYMBOL();

const struct inet_connection_sock_af_ops ipv4_specific =;
EXPORT_SYMBOL();

#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
static const struct tcp_sock_af_ops tcp_sock_ipv4_specific =;
#endif

/* NOTE: A lot of things set to zero explicitly by call to
 *       sk_alloc() so need not be done here.
 */
static int tcp_v4_init_sock(struct sock *sk)
{}

#ifdef CONFIG_TCP_MD5SIG
static void tcp_md5sig_info_free_rcu(struct rcu_head *head)
{}
#endif

void tcp_v4_destroy_sock(struct sock *sk)
{}
EXPORT_SYMBOL();

#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */

static unsigned short seq_file_family(const struct seq_file *seq);

static bool seq_sk_match(struct seq_file *seq, const struct sock *sk)
{}

/* Find a non empty bucket (starting from st->bucket)
 * and return the first sk from it.
 */
static void *listening_get_first(struct seq_file *seq)
{}

/* Find the next sk of "cur" within the same bucket (i.e. st->bucket).
 * If "cur" is the last one in the st->bucket,
 * call listening_get_first() to return the first sk of the next
 * non empty bucket.
 */
static void *listening_get_next(struct seq_file *seq, void *cur)
{}

static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
{}

static inline bool empty_bucket(struct inet_hashinfo *hinfo,
				const struct tcp_iter_state *st)
{}

/*
 * Get first established socket starting from bucket given in st->bucket.
 * If st->bucket is zero, the very first socket in the hash is returned.
 */
static void *established_get_first(struct seq_file *seq)
{}

static void *established_get_next(struct seq_file *seq, void *cur)
{}

static void *established_get_idx(struct seq_file *seq, loff_t pos)
{}

static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
{}

static void *tcp_seek_last_pos(struct seq_file *seq)
{}

void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
{}
EXPORT_SYMBOL();

void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{}
EXPORT_SYMBOL();

void tcp_seq_stop(struct seq_file *seq, void *v)
{}
EXPORT_SYMBOL();

static void get_openreq4(const struct request_sock *req,
			 struct seq_file *f, int i)
{}

static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
{}

static void get_timewait4_sock(const struct inet_timewait_sock *tw,
			       struct seq_file *f, int i)
{}

#define TMPSZ

static int tcp4_seq_show(struct seq_file *seq, void *v)
{}

#ifdef CONFIG_BPF_SYSCALL
struct bpf_tcp_iter_state {};

struct bpf_iter__tcp {};

static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
			     struct sock_common *sk_common, uid_t uid)
{}

static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
{}

static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
				      unsigned int new_batch_sz)
{}

static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
						 struct sock *start_sk)
{}

static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq,
						   struct sock *start_sk)
{}

static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
{}

static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos)
{}

static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{}

static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
{}

static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
{}

static const struct seq_operations bpf_iter_tcp_seq_ops =;
#endif
static unsigned short seq_file_family(const struct seq_file *seq)
{}

static const struct seq_operations tcp4_seq_ops =;

static struct tcp_seq_afinfo tcp4_seq_afinfo =;

static int __net_init tcp4_proc_init_net(struct net *net)
{}

static void __net_exit tcp4_proc_exit_net(struct net *net)
{}

static struct pernet_operations tcp4_net_ops =;

int __init tcp4_proc_init(void)
{}

void tcp4_proc_exit(void)
{}
#endif /* CONFIG_PROC_FS */

/* @wake is one when sk_stream_write_space() calls us.
 * This sends EPOLLOUT only if notsent_bytes is half the limit.
 * This mimics the strategy used in sock_def_write_space().
 */
bool tcp_stream_memory_free(const struct sock *sk, int wake)
{}
EXPORT_SYMBOL();

struct proto tcp_prot =;
EXPORT_SYMBOL();

static void __net_exit tcp_sk_exit(struct net *net)
{}

static void __net_init tcp_set_hashinfo(struct net *net)
{}

static int __net_init tcp_sk_init(struct net *net)
{}

static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
{}

static struct pernet_operations __net_initdata tcp_sk_ops =;

#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta,
		     struct sock_common *sk_common, uid_t uid)

#define INIT_BATCH_SZ

static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux)
{}

static void bpf_iter_fini_tcp(void *priv_data)
{}

static const struct bpf_iter_seq_info tcp_seq_info =;

static const struct bpf_func_proto *
bpf_iter_tcp_get_func_proto(enum bpf_func_id func_id,
			    const struct bpf_prog *prog)
{}

static struct bpf_iter_reg tcp_reg_info =;

static void __init bpf_iter_register(void)
{}

#endif

void __init tcp_v4_init(void)
{}