linux/net/mptcp/subflow.c

// SPDX-License-Identifier: GPL-2.0
/* Multipath TCP
 *
 * Copyright (c) 2017 - 2019, Intel Corporation.
 */

#define pr_fmt(fmt)

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <crypto/sha2.h>
#include <crypto/utils.h>
#include <net/sock.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/protocol.h>
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
#include <net/ip6_route.h>
#include <net/transp_v6.h>
#endif
#include <net/mptcp.h>

#include "protocol.h"
#include "mib.h"

#include <trace/events/mptcp.h>
#include <trace/events/sock.h>

static void mptcp_subflow_ops_undo_override(struct sock *ssk);

static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
				  enum linux_mptcp_mib_field field)
{}

static void subflow_req_destructor(struct request_sock *req)
{}

static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
				  void *hmac)
{}

static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
{}

/* validate received token and create truncated hmac and nonce for SYN-ACK */
static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
{}

static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
{}

static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
{}

static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
{}

static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
{}

/* Init mptcp request socket.
 *
 * Returns an error code if a JOIN has failed and a TCP reset
 * should be sent.
 */
static int subflow_check_req(struct request_sock *req,
			     const struct sock *sk_listener,
			     struct sk_buff *skb)
{}

int mptcp_subflow_init_cookie_req(struct request_sock *req,
				  const struct sock *sk_listener,
				  struct sk_buff *skb)
{}
EXPORT_SYMBOL_GPL();

static enum sk_rst_reason mptcp_get_rst_reason(const struct sk_buff *skb)
{}

static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
					      struct sk_buff *skb,
					      struct flowi *fl,
					      struct request_sock *req,
					      u32 tw_isn)
{}

static void subflow_prep_synack(const struct sock *sk, struct request_sock *req,
				struct tcp_fastopen_cookie *foc,
				enum tcp_synack_type synack_type)
{}

static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
				  struct flowi *fl,
				  struct request_sock *req,
				  struct tcp_fastopen_cookie *foc,
				  enum tcp_synack_type synack_type,
				  struct sk_buff *syn_skb)
{}

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
				  struct flowi *fl,
				  struct request_sock *req,
				  struct tcp_fastopen_cookie *foc,
				  enum tcp_synack_type synack_type,
				  struct sk_buff *syn_skb)
{}

static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
					      struct sk_buff *skb,
					      struct flowi *fl,
					      struct request_sock *req,
					      u32 tw_isn)
{}
#endif

/* validate received truncated hmac and create hmac for third ACK */
static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
{}

void mptcp_subflow_reset(struct sock *ssk)
{}

static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
{}

void __mptcp_sync_state(struct sock *sk, int state)
{}

static void subflow_set_remote_key(struct mptcp_sock *msk,
				   struct mptcp_subflow_context *subflow,
				   const struct mptcp_options_received *mp_opt)
{}

static void mptcp_propagate_state(struct sock *sk, struct sock *ssk,
				  struct mptcp_subflow_context *subflow,
				  const struct mptcp_options_received *mp_opt)
{}

static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
{}

static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
{}

static int subflow_chk_local_id(struct sock *sk)
{}

static int subflow_rebuild_header(struct sock *sk)
{}

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static int subflow_v6_rebuild_header(struct sock *sk)
{}
#endif

static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init;
static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;

static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{}

static void subflow_v4_req_destructor(struct request_sock *req)
{}

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init;
static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
static struct proto tcpv6_prot_override __ro_after_init;

static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{}

static void subflow_v6_req_destructor(struct request_sock *req)
{}
#endif

struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
					       struct sock *sk_listener,
					       bool attach_listener)
{}
EXPORT_SYMBOL();

/* validate hmac received in third ACK */
static bool subflow_hmac_valid(const struct request_sock *req,
			       const struct mptcp_options_received *mp_opt)
{}

static void subflow_ulp_fallback(struct sock *sk,
				 struct mptcp_subflow_context *old_ctx)
{}

void mptcp_subflow_drop_ctx(struct sock *ssk)
{}

void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
				       struct mptcp_subflow_context *subflow,
				       const struct mptcp_options_received *mp_opt)
{}

static struct sock *subflow_syn_recv_sock(const struct sock *sk,
					  struct sk_buff *skb,
					  struct request_sock *req,
					  struct dst_entry *dst,
					  struct request_sock *req_unhash,
					  bool *own_req)
{}

static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
static struct proto tcp_prot_override __ro_after_init;

enum mapping_status {};

static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
{}

static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
{}

static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
{}

static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
					      bool csum_reqd)
{}

static enum mapping_status get_mapping_status(struct sock *ssk,
					      struct mptcp_sock *msk)
{}

static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
				       u64 limit)
{}

/* sched mptcp worker to remove the subflow if no more data is pending */
static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
{}

static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
{}

static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
{}

static bool subflow_check_data_avail(struct sock *ssk)
{}

bool mptcp_subflow_data_available(struct sock *sk)
{}

/* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
 * not the ssk one.
 *
 * In mptcp, rwin is about the mptcp-level connection data.
 *
 * Data that is still on the ssk rx queue can thus be ignored,
 * as far as mptcp peer is concerned that data is still inflight.
 * DSS ACK is updated when skb is moved to the mptcp rx queue.
 */
void mptcp_space(const struct sock *ssk, int *space, int *full_space)
{}

static void subflow_error_report(struct sock *ssk)
{}

static void subflow_data_ready(struct sock *sk)
{}

static void subflow_write_space(struct sock *ssk)
{}

static const struct inet_connection_sock_af_ops *
subflow_default_af_ops(struct sock *sk)
{}

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
{}
#endif

void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
			 struct sockaddr_storage *addr,
			 unsigned short family)
{}

int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
			    const struct mptcp_addr_info *remote)
{}

static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
{}

static void mptcp_subflow_ops_override(struct sock *ssk)
{}

static void mptcp_subflow_ops_undo_override(struct sock *ssk)
{}

int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
				struct socket **new_sock)
{}

static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
							gfp_t priority)
{}

static void __subflow_state_change(struct sock *sk)
{}

static bool subflow_is_done(const struct sock *sk)
{}

static void subflow_state_change(struct sock *sk)
{}

void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
{}

static int subflow_ulp_init(struct sock *sk)
{}

static void subflow_ulp_release(struct sock *ssk)
{}

static void subflow_ulp_clone(const struct request_sock *req,
			      struct sock *newsk,
			      const gfp_t priority)
{}

static void tcp_release_cb_override(struct sock *ssk)
{}

static int tcp_abort_override(struct sock *ssk, int err)
{}

static struct tcp_ulp_ops subflow_ulp_ops __read_mostly =;

static int subflow_ops_init(struct request_sock_ops *subflow_ops)
{}

void __init mptcp_subflow_init(void)
{}