linux/net/ipv4/inet_connection_sock.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Support for INET connection oriented protocols.
 *
 * Authors:	See the TCP sources
 */

#include <linux/module.h>
#include <linux/jhash.h>

#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/tcp_states.h>
#include <net/xfrm.h>
#include <net/tcp.h>
#include <net/sock_reuseport.h>
#include <net/addrconf.h>

#if IS_ENABLED(CONFIG_IPV6)
/* match_sk*_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses
 *				if IPv6 only, and any IPv4 addresses
 *				if not IPv6 only
 * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
 *				IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
 *				and 0.0.0.0 equals to 0.0.0.0 only
 */
static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
				 const struct in6_addr *sk2_rcv_saddr6,
				 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
				 bool sk1_ipv6only, bool sk2_ipv6only,
				 bool match_sk1_wildcard,
				 bool match_sk2_wildcard)
{}
#endif

/* match_sk*_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
 * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
 *				0.0.0.0 only equals to 0.0.0.0
 */
static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
				 bool sk2_ipv6only, bool match_sk1_wildcard,
				 bool match_sk2_wildcard)
{}

bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
			  bool match_wildcard)
{}
EXPORT_SYMBOL();

bool inet_rcv_saddr_any(const struct sock *sk)
{}

/**
 *	inet_sk_get_local_port_range - fetch ephemeral ports range
 *	@sk: socket
 *	@low: pointer to low port
 *	@high: pointer to high port
 *
 *	Fetch netns port range (/proc/sys/net/ipv4/ip_local_port_range)
 *	Range can be overridden if socket got IP_LOCAL_PORT_RANGE option.
 *	Returns true if IP_LOCAL_PORT_RANGE was set on this socket.
 */
bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high)
{}
EXPORT_SYMBOL();

static bool inet_use_bhash2_on_bind(const struct sock *sk)
{}

static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
			       kuid_t sk_uid, bool relax,
			       bool reuseport_cb_ok, bool reuseport_ok)
{}

static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
				   kuid_t sk_uid, bool relax,
				   bool reuseport_cb_ok, bool reuseport_ok)
{}

static bool inet_bhash2_conflict(const struct sock *sk,
				 const struct inet_bind2_bucket *tb2,
				 kuid_t sk_uid,
				 bool relax, bool reuseport_cb_ok,
				 bool reuseport_ok)
{}

#define sk_for_each_bound_bhash(__sk, __tb2, __tb)

/* This should be called only when the tb and tb2 hashbuckets' locks are held */
static int inet_csk_bind_conflict(const struct sock *sk,
				  const struct inet_bind_bucket *tb,
				  const struct inet_bind2_bucket *tb2, /* may be null */
				  bool relax, bool reuseport_ok)
{}

/* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or
 * INADDR_ANY (if ipv4) socket.
 *
 * Caller must hold bhash hashbucket lock with local bh disabled, to protect
 * against concurrent binds on the port for addr any
 */
static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev,
					  bool relax, bool reuseport_ok)
{}

/*
 * Find an open port number for the socket.  Returns with the
 * inet_bind_hashbucket locks held if successful.
 */
static struct inet_bind_hashbucket *
inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret,
			struct inet_bind2_bucket **tb2_ret,
			struct inet_bind_hashbucket **head2_ret, int *port_ret)
{}

static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
				     struct sock *sk)
{}

void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
			       struct sock *sk)
{}

/* Obtain a reference to a local port for the given sock,
 * if snum is zero it means select any available local port.
 * We try to allocate an odd port (and leave even ports for connect())
 */
int inet_csk_get_port(struct sock *sk, unsigned short snum)
{}
EXPORT_SYMBOL_GPL();

/*
 * Wait for an incoming connection, avoid race conditions. This must be called
 * with the socket locked.
 */
static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
{}

/*
 * This will accept the next outstanding connection.
 */
struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg)
{}
EXPORT_SYMBOL();

/*
 * Using different timers for retransmit, delayed acks and probes
 * We may wish use just one timer maintaining a list of expire jiffies
 * to optimize.
 */
void inet_csk_init_xmit_timers(struct sock *sk,
			       void (*retransmit_handler)(struct timer_list *t),
			       void (*delack_handler)(struct timer_list *t),
			       void (*keepalive_handler)(struct timer_list *t))
{}
EXPORT_SYMBOL();

void inet_csk_clear_xmit_timers(struct sock *sk)
{}
EXPORT_SYMBOL();

void inet_csk_clear_xmit_timers_sync(struct sock *sk)
{}

void inet_csk_delete_keepalive_timer(struct sock *sk)
{}
EXPORT_SYMBOL();

void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
{}
EXPORT_SYMBOL();

struct dst_entry *inet_csk_route_req(const struct sock *sk,
				     struct flowi4 *fl4,
				     const struct request_sock *req)
{}
EXPORT_SYMBOL_GPL();

struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
					    struct sock *newsk,
					    const struct request_sock *req)
{}
EXPORT_SYMBOL_GPL();

/* Decide when to expire the request and when to resend SYN-ACK */
static void syn_ack_recalc(struct request_sock *req,
			   const int max_syn_ack_retries,
			   const u8 rskq_defer_accept,
			   int *expire, int *resend)
{}

int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
{}
EXPORT_SYMBOL();

static struct request_sock *
reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener,
		   bool attach_listener)
{}
#define reqsk_alloc(...)

struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
				      struct sock *sk_listener,
				      bool attach_listener)
{}
EXPORT_SYMBOL();

static struct request_sock *inet_reqsk_clone(struct request_sock *req,
					     struct sock *sk)
{}

static void reqsk_queue_migrated(struct request_sock_queue *queue,
				 const struct request_sock *req)
{}

static void reqsk_migrate_reset(struct request_sock *req)
{}

/* return true if req was found in the ehash table */
static bool reqsk_queue_unlink(struct request_sock *req)
{}

bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
{}
EXPORT_SYMBOL();

void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
{}
EXPORT_SYMBOL();

static void reqsk_timer_handler(struct timer_list *t)
{}

static bool reqsk_queue_hash_req(struct request_sock *req,
				 unsigned long timeout)
{}

bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
				   unsigned long timeout)
{}
EXPORT_SYMBOL_GPL();

static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
			   const gfp_t priority)
{}

/**
 *	inet_csk_clone_lock - clone an inet socket, and lock its clone
 *	@sk: the socket to clone
 *	@req: request_sock
 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
 *
 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
 */
struct sock *inet_csk_clone_lock(const struct sock *sk,
				 const struct request_sock *req,
				 const gfp_t priority)
{}
EXPORT_SYMBOL_GPL();

/*
 * At this point, there should be no process reference to this
 * socket, and thus no user references at all.  Therefore we
 * can assume the socket waitqueue is inactive and nobody will
 * try to jump onto it.
 */
void inet_csk_destroy_sock(struct sock *sk)
{}
EXPORT_SYMBOL();

/* This function allows to force a closure of a socket after the call to
 * tcp/dccp_create_openreq_child().
 */
void inet_csk_prepare_forced_close(struct sock *sk)
	__releases(&sk->sk_lock.slock)
{}
EXPORT_SYMBOL();

static int inet_ulp_can_listen(const struct sock *sk)
{}

int inet_csk_listen_start(struct sock *sk)
{}
EXPORT_SYMBOL_GPL();

static void inet_child_forget(struct sock *sk, struct request_sock *req,
			      struct sock *child)
{}

struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
				      struct request_sock *req,
				      struct sock *child)
{}
EXPORT_SYMBOL();

struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
					 struct request_sock *req, bool own_req)
{}
EXPORT_SYMBOL();

/*
 *	This routine closes sockets which have been at least partially
 *	opened, but not yet accepted.
 */
void inet_csk_listen_stop(struct sock *sk)
{}
EXPORT_SYMBOL_GPL();

void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
{}
EXPORT_SYMBOL_GPL();

static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
{}

struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
{}
EXPORT_SYMBOL_GPL();