linux/net/mptcp/protocol.c

// SPDX-License-Identifier: GPL-2.0
/* Multipath TCP
 *
 * Copyright (c) 2017 - 2019, Intel Corporation.
 */

#define pr_fmt(fmt)

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/sched/signal.h>
#include <linux/atomic.h>
#include <net/sock.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/protocol.h>
#include <net/tcp_states.h>
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
#include <net/transp_v6.h>
#endif
#include <net/mptcp.h>
#include <net/hotdata.h>
#include <net/xfrm.h>
#include <asm/ioctls.h>
#include "protocol.h"
#include "mib.h"

#define CREATE_TRACE_POINTS
#include <trace/events/mptcp.h>

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
struct mptcp6_sock {};
#endif

enum {};

static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp;

static void __mptcp_destroy_sock(struct sock *sk);
static void mptcp_check_send_data_fin(struct sock *sk);

DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
static struct net_device mptcp_napi_dev;

/* Returns end sequence number of the receiver's advertised window */
static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
{}

static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk)
{}

static int __mptcp_socket_create(struct mptcp_sock *msk)
{}

/* If the MPC handshake is not started, returns the first subflow,
 * eventually allocating it.
 */
struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk)
{}

static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
{}

static void mptcp_rmem_fwd_alloc_add(struct sock *sk, int size)
{}

static void mptcp_rmem_charge(struct sock *sk, int size)
{}

static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
			       struct sk_buff *from)
{}

static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
				   struct sk_buff *from)
{}

static void __mptcp_rmem_reclaim(struct sock *sk, int amount)
{}

static void mptcp_rmem_uncharge(struct sock *sk, int size)
{}

static void mptcp_rfree(struct sk_buff *skb)
{}

void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
{}

/* "inspired" by tcp_data_queue_ofo(), main differences:
 * - use mptcp seqs
 * - don't cope with sacks
 */
static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
{}

static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)
{}

static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
			     struct sk_buff *skb, unsigned int offset,
			     size_t copy_len)
{}

static void mptcp_stop_rtx_timer(struct sock *sk)
{}

static void mptcp_close_wake_up(struct sock *sk)
{}

/* called under the msk socket lock */
static bool mptcp_pending_data_fin_ack(struct sock *sk)
{}

static void mptcp_check_data_fin_ack(struct sock *sk)
{}

/* can be called with no lock acquired */
static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
{}

static void mptcp_set_datafin_timeout(struct sock *sk)
{}

static void __mptcp_set_timeout(struct sock *sk, long tout)
{}

static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow)
{}

static void mptcp_set_timeout(struct sock *sk)
{}

static inline bool tcp_can_send_ack(const struct sock *ssk)
{}

void __mptcp_subflow_send_ack(struct sock *ssk)
{}

static void mptcp_subflow_send_ack(struct sock *ssk)
{}

static void mptcp_send_ack(struct mptcp_sock *msk)
{}

static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
{}

static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
{}

static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
{}

static bool mptcp_check_data_fin(struct sock *sk)
{}

static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
					   struct sock *ssk,
					   unsigned int *bytes)
{}

static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
{}

static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk)
{}

void __mptcp_error_report(struct sock *sk)
{}

/* In most cases we will be able to lock the mptcp socket.  If its already
 * owned, we need to defer to the work queue to avoid ABBA deadlock.
 */
static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
{}

void mptcp_data_ready(struct sock *sk, struct sock *ssk)
{}

static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk)
{}

static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
{}

static void __mptcp_flush_join_list(struct sock *sk, struct list_head *join_list)
{}

static bool mptcp_rtx_timer_pending(struct sock *sk)
{}

static void mptcp_reset_rtx_timer(struct sock *sk)
{}

bool mptcp_schedule_work(struct sock *sk)
{}

static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
{}

static bool mptcp_skb_can_collapse_to(u64 write_seq,
				      const struct sk_buff *skb,
				      const struct mptcp_ext *mpext)
{}

/* we can append data to the given data frag if:
 * - there is space available in the backing page_frag
 * - the data frag tail matches the current page_frag free offset
 * - the data frag end sequence number matches the current write seq
 */
static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
				       const struct page_frag *pfrag,
				       const struct mptcp_data_frag *df)
{}

static void dfrag_uncharge(struct sock *sk, int len)
{}

static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
{}

/* called under both the msk socket lock and the data lock */
static void __mptcp_clean_una(struct sock *sk)
{}

static void __mptcp_clean_una_wakeup(struct sock *sk)
{}

static void mptcp_clean_una_wakeup(struct sock *sk)
{}

static void mptcp_enter_memory_pressure(struct sock *sk)
{}

/* ensure we get enough memory for the frag hdr, beyond some minimal amount of
 * data
 */
static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
{}

static struct mptcp_data_frag *
mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
		      int orig_offset)
{}

struct mptcp_sendmsg_info {};

static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk,
				    u64 data_seq, int avail_size)
{}

static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp)
{}

static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
{}

static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
{}

static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
{}

/* note: this always recompute the csum on the whole skb, even
 * if we just appended a single frag. More status info needed
 */
static void mptcp_update_data_checksum(struct sk_buff *skb, int added)
{}

static void mptcp_update_infinite_map(struct mptcp_sock *msk,
				      struct sock *ssk,
				      struct mptcp_ext *mpext)
{}

#define MPTCP_MAX_GSO_SIZE

static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
			      struct mptcp_data_frag *dfrag,
			      struct mptcp_sendmsg_info *info)
{}

#define MPTCP_SEND_BURST_SIZE

struct subflow_send_info {};

void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow)
{}

bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
{}

#define SSK_MODE_ACTIVE
#define SSK_MODE_BACKUP
#define SSK_MODE_MAX

/* implement the mptcp packet scheduler;
 * returns the subflow that will transmit the next DSS
 * additionally updates the rtx timeout
 */
struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
{}

static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info)
{}

static void mptcp_update_post_push(struct mptcp_sock *msk,
				   struct mptcp_data_frag *dfrag,
				   u32 sent)
{}

void mptcp_check_and_set_pending(struct sock *sk)
{}

static int __subflow_push_pending(struct sock *sk, struct sock *ssk,
				  struct mptcp_sendmsg_info *info)
{}

void __mptcp_push_pending(struct sock *sk, unsigned int flags)
{}

static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool first)
{}

static int mptcp_disconnect(struct sock *sk, int flags);

static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
				  size_t len, int *copied_syn)
{}

static int do_copy_data_nocache(struct sock *sk, int copy,
				struct iov_iter *from, char *to)
{}

/* open-code sk_stream_memory_free() plus sent limit computation to
 * avoid indirect calls in fast-path.
 * Called under the msk socket lock, so we can avoid a bunch of ONCE
 * annotations.
 */
static u32 mptcp_send_limit(const struct sock *sk)
{}

static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{}

static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
				struct msghdr *msg,
				size_t len, int flags,
				struct scm_timestamping_internal *tss,
				int *cmsg_flags)
{}

/* receive buffer autotuning.  See tcp_rcv_space_adjust for more information.
 *
 * Only difference: Use highest rtt estimate of the subflows in use.
 */
static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
{}

static void __mptcp_update_rmem(struct sock *sk)
{}

static void __mptcp_splice_receive_queue(struct sock *sk)
{}

static bool __mptcp_move_skbs(struct mptcp_sock *msk)
{}

static unsigned int mptcp_inq_hint(const struct sock *sk)
{}

static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
			 int flags, int *addr_len)
{}

static void mptcp_retransmit_timer(struct timer_list *t)
{}

static void mptcp_tout_timer(struct timer_list *t)
{}

/* Find an idle subflow.  Return NULL if there is unacked data at tcp
 * level.
 *
 * A backup subflow is returned only if that is the only kind available.
 */
struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
{}

bool __mptcp_retransmit_pending_data(struct sock *sk)
{}

/* flags for __mptcp_close_ssk() */
#define MPTCP_CF_PUSH
#define MPTCP_CF_FASTCLOSE

/* be sure to send a reset only if the caller asked for it, also
 * clean completely the subflow status when the subflow reaches
 * TCP_CLOSE state
 */
static void __mptcp_subflow_disconnect(struct sock *ssk,
				       struct mptcp_subflow_context *subflow,
				       unsigned int flags)
{}

/* subflow sockets can be either outgoing (connect) or incoming
 * (accept).
 *
 * Outgoing subflows use in-kernel sockets.
 * Incoming subflows do not have their own 'struct socket' allocated,
 * so we need to use tcp_close() after detaching them from the mptcp
 * parent socket.
 */
static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
			      struct mptcp_subflow_context *subflow,
			      unsigned int flags)
{}

void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
		     struct mptcp_subflow_context *subflow)
{}

static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
{}

static void __mptcp_close_subflow(struct sock *sk)
{}

static bool mptcp_close_tout_expired(const struct sock *sk)
{}

static void mptcp_check_fastclose(struct mptcp_sock *msk)
{}

static void __mptcp_retrans(struct sock *sk)
{}

/* schedule the timeout timer for the relevant event: either close timeout
 * or mp_fail timeout. The close timeout takes precedence on the mp_fail one
 */
void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout)
{}

static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
{}

static void mptcp_do_fastclose(struct sock *sk)
{}

static void mptcp_worker(struct work_struct *work)
{}

static void __mptcp_init_sock(struct sock *sk)
{}

static void mptcp_ca_reset(struct sock *sk)
{}

static int mptcp_init_sock(struct sock *sk)
{}

static void __mptcp_clear_xmit(struct sock *sk)
{}

void mptcp_cancel_work(struct sock *sk)
{}

void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
{}

void mptcp_set_state(struct sock *sk, int state)
{}

static const unsigned char new_state[16] =;

static int mptcp_close_state(struct sock *sk)
{}

static void mptcp_check_send_data_fin(struct sock *sk)
{}

static void __mptcp_wr_shutdown(struct sock *sk)
{}

static void __mptcp_destroy_sock(struct sock *sk)
{}

void __mptcp_unaccepted_force_close(struct sock *sk)
{}

static __poll_t mptcp_check_readable(struct sock *sk)
{}

static void mptcp_check_listen_stop(struct sock *sk)
{}

bool __mptcp_close(struct sock *sk, long timeout)
{}

static void mptcp_close(struct sock *sk, long timeout)
{}

static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
{}

static int mptcp_disconnect(struct sock *sk, int flags)
{}

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
{}

static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk)
{}
#endif

static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk)
{}

struct sock *mptcp_sk_clone_init(const struct sock *sk,
				 const struct mptcp_options_received *mp_opt,
				 struct sock *ssk,
				 struct request_sock *req)
{}

void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
{}

void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
{}

static void mptcp_destroy(struct sock *sk)
{}

void __mptcp_data_acked(struct sock *sk)
{}

void __mptcp_check_push(struct sock *sk, struct sock *ssk)
{}

#define MPTCP_FLAGS_PROCESS_CTX_NEED

/* processes deferred events and flush wmem */
static void mptcp_release_cb(struct sock *sk)
	__must_hold(&sk->sk_lock.slock)
{}

/* MP_JOIN client subflow must wait for 4th ack before sending any data:
 * TCP can't schedule delack timer before the subflow is fully established.
 * MPTCP uses the delack timer to do 3rd ack retransmissions
 */
static void schedule_3rdack_retransmission(struct sock *ssk)
{}

void mptcp_subflow_process_delegated(struct sock *ssk, long status)
{}

static int mptcp_hash(struct sock *sk)
{}

static void mptcp_unhash(struct sock *sk)
{}

static int mptcp_get_port(struct sock *sk, unsigned short snum)
{}

void mptcp_finish_connect(struct sock *ssk)
{}

void mptcp_sock_graft(struct sock *sk, struct socket *parent)
{}

bool mptcp_finish_join(struct sock *ssk)
{}

static void mptcp_shutdown(struct sock *sk, int how)
{}

static int mptcp_forward_alloc_get(const struct sock *sk)
{}

static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
{}

static int mptcp_ioctl(struct sock *sk, int cmd, int *karg)
{}

static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
					 struct mptcp_subflow_context *subflow)
{}

static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{}

static struct proto mptcp_prot =;

static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{}

static int mptcp_listen(struct socket *sock, int backlog)
{}

static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
			       struct proto_accept_arg *arg)
{}

static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
{}

static __poll_t mptcp_poll(struct file *file, struct socket *sock,
			   struct poll_table_struct *wait)
{}

static const struct proto_ops mptcp_stream_ops =;

static struct inet_protosw mptcp_protosw =;

static int mptcp_napi_poll(struct napi_struct *napi, int budget)
{}

void __init mptcp_proto_init(void)
{}

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static const struct proto_ops mptcp_v6_stream_ops =;

static struct proto mptcp_v6_prot;

static struct inet_protosw mptcp_v6_protosw =;

int __init mptcp_proto_v6_init(void)
{}
#endif