#ifndef _INET_CONNECTION_SOCK_H
#define _INET_CONNECTION_SOCK_H
#include <linux/compiler.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/poll.h>
#include <linux/kernel.h>
#include <linux/sockptr.h>
#include <net/inet_sock.h>
#include <net/request_sock.h>
#undef INET_CSK_CLEAR_TIMERS
struct inet_bind_bucket;
struct inet_bind2_bucket;
struct tcp_congestion_ops;
struct inet_connection_sock_af_ops { … };
struct inet_connection_sock { … };
#define ICSK_TIME_RETRANS …
#define ICSK_TIME_DACK …
#define ICSK_TIME_PROBE0 …
#define ICSK_TIME_LOSS_PROBE …
#define ICSK_TIME_REO_TIMEOUT …
#define inet_csk(ptr) …
static inline void *inet_csk_ca(const struct sock *sk)
{ … }
struct sock *inet_csk_clone_lock(const struct sock *sk,
const struct request_sock *req,
const gfp_t priority);
enum inet_csk_ack_state_t { … };
void inet_csk_init_xmit_timers(struct sock *sk,
void (*retransmit_handler)(struct timer_list *),
void (*delack_handler)(struct timer_list *),
void (*keepalive_handler)(struct timer_list *));
void inet_csk_clear_xmit_timers(struct sock *sk);
void inet_csk_clear_xmit_timers_sync(struct sock *sk);
static inline void inet_csk_schedule_ack(struct sock *sk)
{ … }
static inline int inet_csk_ack_scheduled(const struct sock *sk)
{ … }
static inline void inet_csk_delack_init(struct sock *sk)
{ … }
void inet_csk_delete_keepalive_timer(struct sock *sk);
void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
{ … }
static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
unsigned long when,
const unsigned long max_when)
{ … }
static inline unsigned long
inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
unsigned long max_when)
{ … }
struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg);
int inet_csk_get_port(struct sock *sk, unsigned short snum);
struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
const struct request_sock *req);
struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
struct sock *newsk,
const struct request_sock *req);
struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
struct request_sock *req,
struct sock *child);
bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout);
struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
struct request_sock *req,
bool own_req);
static inline void inet_csk_reqsk_queue_added(struct sock *sk)
{ … }
static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
{ … }
static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
{ … }
bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
static inline unsigned long
reqsk_timeout(struct request_sock *req, unsigned long max_timeout)
{ … }
static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
{ … }
void inet_csk_destroy_sock(struct sock *sk);
void inet_csk_prepare_forced_close(struct sock *sk);
static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
{ … }
int inet_csk_listen_start(struct sock *sk);
void inet_csk_listen_stop(struct sock *sk);
void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
struct sock *sk);
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
{ … }
static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
{ … }
static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
{ … }
static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
{ … }
static inline bool inet_csk_has_ulp(const struct sock *sk)
{ … }
static inline void inet_init_csk_locks(struct sock *sk)
{ … }
#endif