#define pr_fmt(fmt) …
#include <linux/bottom_half.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/cache.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/times.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/inet_hashtables.h>
#include <net/tcp.h>
#include <net/transp_v6.h>
#include <net/ipv6.h>
#include <net/inet_common.h>
#include <net/timewait_sock.h>
#include <net/xfrm.h>
#include <net/secure_seq.h>
#include <net/busy_poll.h>
#include <net/rstreason.h>
#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/inetdevice.h>
#include <linux/btf_ids.h>
#include <crypto/hash.h>
#include <linux/scatterlist.h>
#include <trace/events/tcp.h>
#ifdef CONFIG_TCP_MD5SIG
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
__be32 daddr, __be32 saddr, const struct tcphdr *th);
#endif
struct inet_hashinfo tcp_hashinfo;
EXPORT_SYMBOL(…);
static DEFINE_PER_CPU(struct sock_bh_locked, ipv4_tcp_sk) = …;
static u32 tcp_v4_init_seq(const struct sk_buff *skb)
{ … }
static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
{ … }
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
{ … }
EXPORT_SYMBOL_GPL(…);
static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
{ … }
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{ … }
EXPORT_SYMBOL(…);
void tcp_v4_mtu_reduced(struct sock *sk)
{ … }
EXPORT_SYMBOL(…);
static void do_redirect(struct sk_buff *skb, struct sock *sk)
{ … }
void tcp_req_err(struct sock *sk, u32 seq, bool abort)
{ … }
EXPORT_SYMBOL(…);
void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
{ … }
EXPORT_SYMBOL(…);
int tcp_v4_err(struct sk_buff *skb, u32 info)
{ … }
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
{ … }
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
{ … }
EXPORT_SYMBOL(…);
#define REPLY_OPTIONS_LEN …
static bool tcp_v4_ao_sign_reset(const struct sock *sk, struct sk_buff *skb,
const struct tcp_ao_hdr *aoh,
struct ip_reply_arg *arg, struct tcphdr *reply,
__be32 reply_options[REPLY_OPTIONS_LEN])
{ … }
static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb,
enum sk_rst_reason reason)
{ … }
static void tcp_v4_send_ack(const struct sock *sk,
struct sk_buff *skb, u32 seq, u32 ack,
u32 win, u32 tsval, u32 tsecr, int oif,
struct tcp_key *key,
int reply_flags, u8 tos, u32 txhash)
{ … }
static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
{ … }
static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
{ … }
static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
enum tcp_synack_type synack_type,
struct sk_buff *syn_skb)
{ … }
static void tcp_v4_reqsk_destructor(struct request_sock *req)
{ … }
#ifdef CONFIG_TCP_MD5SIG
DEFINE_STATIC_KEY_DEFERRED_FALSE(…) …;
EXPORT_SYMBOL(…);
static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
{ … }
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr,
int family, bool any_l3index)
{ … }
EXPORT_SYMBOL(…);
static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
const union tcp_md5_addr *addr,
int family, u8 prefixlen,
int l3index, u8 flags)
{ … }
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk)
{ … }
EXPORT_SYMBOL(…);
static int tcp_md5sig_info_add(struct sock *sk, gfp_t gfp)
{ … }
static int __tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
int family, u8 prefixlen, int l3index, u8 flags,
const u8 *newkey, u8 newkeylen, gfp_t gfp)
{ … }
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
int family, u8 prefixlen, int l3index, u8 flags,
const u8 *newkey, u8 newkeylen)
{ … }
EXPORT_SYMBOL(…);
int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
int family, u8 prefixlen, int l3index,
struct tcp_md5sig_key *key)
{ … }
EXPORT_SYMBOL(…);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
u8 prefixlen, int l3index, u8 flags)
{ … }
EXPORT_SYMBOL(…);
void tcp_clear_md5_list(struct sock *sk)
{ … }
static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
sockptr_t optval, int optlen)
{ … }
static int tcp_v4_md5_hash_headers(struct tcp_sigpool *hp,
__be32 daddr, __be32 saddr,
const struct tcphdr *th, int nbytes)
{ … }
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
__be32 daddr, __be32 saddr, const struct tcphdr *th)
{ … }
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
const struct sock *sk,
const struct sk_buff *skb)
{ … }
EXPORT_SYMBOL(…);
#endif
static void tcp_v4_init_req(struct request_sock *req,
const struct sock *sk_listener,
struct sk_buff *skb)
{ … }
static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
struct sk_buff *skb,
struct flowi *fl,
struct request_sock *req,
u32 tw_isn)
{ … }
struct request_sock_ops tcp_request_sock_ops __read_mostly = …;
const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = …;
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{ … }
EXPORT_SYMBOL(…);
struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst,
struct request_sock *req_unhash,
bool *own_req)
{ … }
EXPORT_SYMBOL(…);
static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
{ … }
u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
struct tcphdr *th, u32 *cookie)
{ … }
INDIRECT_CALLABLE_DECLARE(…);
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{ … }
EXPORT_SYMBOL(…);
int tcp_v4_early_demux(struct sk_buff *skb)
{ … }
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
enum skb_drop_reason *reason)
{ … }
EXPORT_SYMBOL(…);
int tcp_filter(struct sock *sk, struct sk_buff *skb)
{ … }
EXPORT_SYMBOL(…);
static void tcp_v4_restore_cb(struct sk_buff *skb)
{ … }
static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
const struct tcphdr *th)
{ … }
int tcp_v4_rcv(struct sk_buff *skb)
{ … }
static struct timewait_sock_ops tcp_timewait_sock_ops = …;
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
{ … }
EXPORT_SYMBOL(…);
const struct inet_connection_sock_af_ops ipv4_specific = …;
EXPORT_SYMBOL(…);
#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = …;
#endif
static int tcp_v4_init_sock(struct sock *sk)
{ … }
#ifdef CONFIG_TCP_MD5SIG
static void tcp_md5sig_info_free_rcu(struct rcu_head *head)
{ … }
#endif
void tcp_v4_destroy_sock(struct sock *sk)
{ … }
EXPORT_SYMBOL(…);
#ifdef CONFIG_PROC_FS
static unsigned short seq_file_family(const struct seq_file *seq);
static bool seq_sk_match(struct seq_file *seq, const struct sock *sk)
{ … }
static void *listening_get_first(struct seq_file *seq)
{ … }
static void *listening_get_next(struct seq_file *seq, void *cur)
{ … }
static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
{ … }
static inline bool empty_bucket(struct inet_hashinfo *hinfo,
const struct tcp_iter_state *st)
{ … }
static void *established_get_first(struct seq_file *seq)
{ … }
static void *established_get_next(struct seq_file *seq, void *cur)
{ … }
static void *established_get_idx(struct seq_file *seq, loff_t pos)
{ … }
static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
{ … }
static void *tcp_seek_last_pos(struct seq_file *seq)
{ … }
void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
{ … }
EXPORT_SYMBOL(…);
void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{ … }
EXPORT_SYMBOL(…);
void tcp_seq_stop(struct seq_file *seq, void *v)
{ … }
EXPORT_SYMBOL(…);
static void get_openreq4(const struct request_sock *req,
struct seq_file *f, int i)
{ … }
static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
{ … }
static void get_timewait4_sock(const struct inet_timewait_sock *tw,
struct seq_file *f, int i)
{ … }
#define TMPSZ …
static int tcp4_seq_show(struct seq_file *seq, void *v)
{ … }
#ifdef CONFIG_BPF_SYSCALL
struct bpf_tcp_iter_state { … };
struct bpf_iter__tcp { … };
static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
struct sock_common *sk_common, uid_t uid)
{ … }
static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
{ … }
static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
unsigned int new_batch_sz)
{ … }
static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
struct sock *start_sk)
{ … }
static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq,
struct sock *start_sk)
{ … }
static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
{ … }
static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos)
{ … }
static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{ … }
static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
{ … }
static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
{ … }
static const struct seq_operations bpf_iter_tcp_seq_ops = …;
#endif
static unsigned short seq_file_family(const struct seq_file *seq)
{ … }
static const struct seq_operations tcp4_seq_ops = …;
static struct tcp_seq_afinfo tcp4_seq_afinfo = …;
static int __net_init tcp4_proc_init_net(struct net *net)
{ … }
static void __net_exit tcp4_proc_exit_net(struct net *net)
{ … }
static struct pernet_operations tcp4_net_ops = …;
int __init tcp4_proc_init(void)
{ … }
void tcp4_proc_exit(void)
{ … }
#endif
bool tcp_stream_memory_free(const struct sock *sk, int wake)
{ … }
EXPORT_SYMBOL(…);
struct proto tcp_prot = …;
EXPORT_SYMBOL(…);
static void __net_exit tcp_sk_exit(struct net *net)
{ … }
static void __net_init tcp_set_hashinfo(struct net *net)
{ … }
static int __net_init tcp_sk_init(struct net *net)
{ … }
static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
{ … }
static struct pernet_operations __net_initdata tcp_sk_ops = …;
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta,
struct sock_common *sk_common, uid_t uid)
#define INIT_BATCH_SZ …
static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux)
{ … }
static void bpf_iter_fini_tcp(void *priv_data)
{ … }
static const struct bpf_iter_seq_info tcp_seq_info = …;
static const struct bpf_func_proto *
bpf_iter_tcp_get_func_proto(enum bpf_func_id func_id,
const struct bpf_prog *prog)
{ … }
static struct bpf_iter_reg tcp_reg_info = …;
static void __init bpf_iter_register(void)
{ … }
#endif
void __init tcp_v4_init(void)
{ … }