linux/include/net/request_sock.h

/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 * NET		Generic infrastructure for Network protocols.
 *
 *		Definitions for request_sock
 *
 * Authors:	Arnaldo Carvalho de Melo <[email protected]>
 *
 * 		From code originally in include/net/tcp.h
 */
#ifndef _REQUEST_SOCK_H
#define _REQUEST_SOCK_H

#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/refcount.h>

#include <net/sock.h>
#include <net/rstreason.h>

struct request_sock;
struct sk_buff;
struct dst_entry;
struct proto;

struct request_sock_ops {};

int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);

struct saved_syn {};

/* struct request_sock - mini sock to represent a connection request
 */
struct request_sock {};

static inline struct request_sock *inet_reqsk(const struct sock *sk)
{}

static inline struct sock *req_to_sk(struct request_sock *req)
{}

/**
 * skb_steal_sock - steal a socket from an sk_buff
 * @skb: sk_buff to steal the socket from
 * @refcounted: is set to true if the socket is reference-counted
 * @prefetched: is set to true if the socket was assigned from bpf
 */
static inline struct sock *skb_steal_sock(struct sk_buff *skb,
					  bool *refcounted, bool *prefetched)
{}

static inline void __reqsk_free(struct request_sock *req)
{}

static inline void reqsk_free(struct request_sock *req)
{}

static inline void reqsk_put(struct request_sock *req)
{}

/*
 * For a TCP Fast Open listener -
 *	lock - protects the access to all the reqsk, which is co-owned by
 *		the listener and the child socket.
 *	qlen - pending TFO requests (still in TCP_SYN_RECV).
 *	max_qlen - max TFO reqs allowed before TFO is disabled.
 *
 *	XXX (TFO) - ideally these fields can be made as part of "listen_sock"
 *	structure above. But there is some implementation difficulty due to
 *	listen_sock being part of request_sock_queue hence will be freed when
 *	a listener is stopped. But TFO related fields may continue to be
 *	accessed even after a listener is closed, until its sk_refcnt drops
 *	to 0 implying no more outstanding TFO reqs. One solution is to keep
 *	listen_opt around until	sk_refcnt drops to 0. But there is some other
 *	complexity that needs to be resolved. E.g., a listener can be disabled
 *	temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
 */
struct fastopen_queue {};

/** struct request_sock_queue - queue of request_socks
 *
 * @rskq_accept_head - FIFO head of established children
 * @rskq_accept_tail - FIFO tail of established children
 * @rskq_defer_accept - User waits for some data after accept()
 *
 */
struct request_sock_queue {};

void reqsk_queue_alloc(struct request_sock_queue *queue);

void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
			   bool reset);

static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
{}

static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
						      struct sock *parent)
{}

static inline void reqsk_queue_removed(struct request_sock_queue *queue,
				       const struct request_sock *req)
{}

static inline void reqsk_queue_added(struct request_sock_queue *queue)
{}

static inline int reqsk_queue_len(const struct request_sock_queue *queue)
{}

static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
{}

/* RFC 7323 2.3 Using the Window Scale Option
 *  The window field (SEG.WND) of every outgoing segment, with the
 *  exception of <SYN> segments, MUST be right-shifted by
 *  Rcv.Wind.Shift bits.
 *
 * This means the SEG.WND carried in SYNACK can not exceed 65535.
 * We use this property to harden TCP stack while in NEW_SYN_RECV state.
 */
static inline u32 tcp_synack_window(const struct request_sock *req)
{}
#endif /* _REQUEST_SOCK_H */