#include <linux/module.h>
#include <linux/jhash.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/tcp_states.h>
#include <net/xfrm.h>
#include <net/tcp.h>
#include <net/sock_reuseport.h>
#include <net/addrconf.h>
#if IS_ENABLED(CONFIG_IPV6)
static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
const struct in6_addr *sk2_rcv_saddr6,
__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
bool sk1_ipv6only, bool sk2_ipv6only,
bool match_sk1_wildcard,
bool match_sk2_wildcard)
{ … }
#endif
static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
bool sk2_ipv6only, bool match_sk1_wildcard,
bool match_sk2_wildcard)
{ … }
bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
bool match_wildcard)
{ … }
EXPORT_SYMBOL(…);
bool inet_rcv_saddr_any(const struct sock *sk)
{ … }
bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high)
{ … }
EXPORT_SYMBOL(…);
static bool inet_use_bhash2_on_bind(const struct sock *sk)
{ … }
static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
kuid_t sk_uid, bool relax,
bool reuseport_cb_ok, bool reuseport_ok)
{ … }
static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
kuid_t sk_uid, bool relax,
bool reuseport_cb_ok, bool reuseport_ok)
{ … }
static bool inet_bhash2_conflict(const struct sock *sk,
const struct inet_bind2_bucket *tb2,
kuid_t sk_uid,
bool relax, bool reuseport_cb_ok,
bool reuseport_ok)
{ … }
#define sk_for_each_bound_bhash(__sk, __tb2, __tb) …
static int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb,
const struct inet_bind2_bucket *tb2,
bool relax, bool reuseport_ok)
{ … }
static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev,
bool relax, bool reuseport_ok)
{ … }
static struct inet_bind_hashbucket *
inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret,
struct inet_bind2_bucket **tb2_ret,
struct inet_bind_hashbucket **head2_ret, int *port_ret)
{ … }
static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
struct sock *sk)
{ … }
void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
struct sock *sk)
{ … }
int inet_csk_get_port(struct sock *sk, unsigned short snum)
{ … }
EXPORT_SYMBOL_GPL(…);
static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
{ … }
struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg)
{ … }
EXPORT_SYMBOL(…);
void inet_csk_init_xmit_timers(struct sock *sk,
void (*retransmit_handler)(struct timer_list *t),
void (*delack_handler)(struct timer_list *t),
void (*keepalive_handler)(struct timer_list *t))
{ … }
EXPORT_SYMBOL(…);
void inet_csk_clear_xmit_timers(struct sock *sk)
{ … }
EXPORT_SYMBOL(…);
void inet_csk_clear_xmit_timers_sync(struct sock *sk)
{ … }
void inet_csk_delete_keepalive_timer(struct sock *sk)
{ … }
EXPORT_SYMBOL(…);
void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
{ … }
EXPORT_SYMBOL(…);
struct dst_entry *inet_csk_route_req(const struct sock *sk,
struct flowi4 *fl4,
const struct request_sock *req)
{ … }
EXPORT_SYMBOL_GPL(…);
struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
struct sock *newsk,
const struct request_sock *req)
{ … }
EXPORT_SYMBOL_GPL(…);
static void syn_ack_recalc(struct request_sock *req,
const int max_syn_ack_retries,
const u8 rskq_defer_accept,
int *expire, int *resend)
{ … }
int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
{ … }
EXPORT_SYMBOL(…);
static struct request_sock *
reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener,
bool attach_listener)
{ … }
#define reqsk_alloc(...) …
struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
struct sock *sk_listener,
bool attach_listener)
{ … }
EXPORT_SYMBOL(…);
static struct request_sock *inet_reqsk_clone(struct request_sock *req,
struct sock *sk)
{ … }
static void reqsk_queue_migrated(struct request_sock_queue *queue,
const struct request_sock *req)
{ … }
static void reqsk_migrate_reset(struct request_sock *req)
{ … }
static bool reqsk_queue_unlink(struct request_sock *req)
{ … }
bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
{ … }
EXPORT_SYMBOL(…);
void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
{ … }
EXPORT_SYMBOL(…);
static void reqsk_timer_handler(struct timer_list *t)
{ … }
static bool reqsk_queue_hash_req(struct request_sock *req,
unsigned long timeout)
{ … }
bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout)
{ … }
EXPORT_SYMBOL_GPL(…);
static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
const gfp_t priority)
{ … }
struct sock *inet_csk_clone_lock(const struct sock *sk,
const struct request_sock *req,
const gfp_t priority)
{ … }
EXPORT_SYMBOL_GPL(…);
void inet_csk_destroy_sock(struct sock *sk)
{ … }
EXPORT_SYMBOL(…);
void inet_csk_prepare_forced_close(struct sock *sk)
__releases(&sk->sk_lock.slock)
{ … }
EXPORT_SYMBOL(…);
static int inet_ulp_can_listen(const struct sock *sk)
{ … }
int inet_csk_listen_start(struct sock *sk)
{ … }
EXPORT_SYMBOL_GPL(…);
static void inet_child_forget(struct sock *sk, struct request_sock *req,
struct sock *child)
{ … }
struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
struct request_sock *req,
struct sock *child)
{ … }
EXPORT_SYMBOL(…);
struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
struct request_sock *req, bool own_req)
{ … }
EXPORT_SYMBOL(…);
void inet_csk_listen_stop(struct sock *sk)
{ … }
EXPORT_SYMBOL_GPL(…);
void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
{ … }
EXPORT_SYMBOL_GPL(…);
static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
{ … }
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
{ … }
EXPORT_SYMBOL_GPL(…);