linux/net/rxrpc/input.c

// SPDX-License-Identifier: GPL-2.0-or-later
/* Processing of received RxRPC packets
 *
 * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells ([email protected])
 */

#define pr_fmt(fmt)

#include "ar-internal.h"

/* Override priority when generating ACKs for received DATA */
static const u8 rxrpc_ack_priority[RXRPC_ACK__INVALID] =;

static void rxrpc_proto_abort(struct rxrpc_call *call, rxrpc_seq_t seq,
			      enum rxrpc_abort_reason why)
{}

/*
 * Do TCP-style congestion management [RFC 5681].
 */
static void rxrpc_congestion_management(struct rxrpc_call *call,
					struct sk_buff *skb,
					struct rxrpc_ack_summary *summary,
					rxrpc_serial_t acked_serial)
{}

/*
 * Degrade the congestion window if we haven't transmitted a packet for >1RTT.
 */
void rxrpc_congestion_degrade(struct rxrpc_call *call)
{}

/*
 * Apply a hard ACK by advancing the Tx window.
 */
static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
				   struct rxrpc_ack_summary *summary)
{}

/*
 * End the transmission phase of a call.
 *
 * This occurs when we get an ACKALL packet, the first DATA packet of a reply,
 * or a final ACK packet.
 */
static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
			       enum rxrpc_abort_reason abort_why)
{}

/*
 * Begin the reply reception phase of a call.
 */
static bool rxrpc_receiving_reply(struct rxrpc_call *call)
{}

/*
 * End the packet reception phase.
 */
static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
{}

static void rxrpc_input_update_ack_window(struct rxrpc_call *call,
					  rxrpc_seq_t window, rxrpc_seq_t wtop)
{}

/*
 * Push a DATA packet onto the Rx queue.
 */
static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb,
				   rxrpc_seq_t window, rxrpc_seq_t wtop,
				   enum rxrpc_receive_trace why)
{}

/*
 * Process a DATA packet.
 */
static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
				 bool *_notify, rxrpc_serial_t *_ack_serial, int *_ack_reason)
{}

/*
 * Split a jumbo packet and file the bits separately.
 */
static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb)
{}

/*
 * Process a DATA packet, adding the packet to the Rx ring.  The caller's
 * packet ref must be passed on or discarded.
 */
static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
{}

/*
 * See if there's a cached RTT probe to complete.
 */
static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
				     ktime_t resp_time,
				     rxrpc_serial_t acked_serial,
				     rxrpc_serial_t ack_serial,
				     enum rxrpc_rtt_rx_trace type)
{}

/*
 * Process the extra information that may be appended to an ACK packet
 */
static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb,
				    struct rxrpc_acktrailer *trailer)
{}

/*
 * Determine how many nacks from the previous ACK have now been satisfied.
 */
static rxrpc_seq_t rxrpc_input_check_prev_ack(struct rxrpc_call *call,
					      struct rxrpc_ack_summary *summary,
					      rxrpc_seq_t seq)
{}

/*
 * Process individual soft ACKs.
 *
 * Each ACK in the array corresponds to one packet and can be either an ACK or
 * a NAK.  If we get find an explicitly NAK'd packet we resend immediately;
 * packets that lie beyond the end of the ACK list are scheduled for resend by
 * the timer on the basis that the peer might just not have processed them at
 * the time the ACK was sent.
 */
static void rxrpc_input_soft_acks(struct rxrpc_call *call,
				  struct rxrpc_ack_summary *summary,
				  struct sk_buff *skb,
				  rxrpc_seq_t seq,
				  rxrpc_seq_t since)
{}

/*
 * Return true if the ACK is valid - ie. it doesn't appear to have regressed
 * with respect to the ack state conveyed by preceding ACKs.
 */
static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
			       rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt)
{}

/*
 * Process an ACK packet.
 *
 * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet
 * in the ACK array.  Anything before that is hard-ACK'd and may be discarded.
 *
 * A hard-ACK means that a packet has been processed and may be discarded; a
 * soft-ACK means that the packet may be discarded and retransmission
 * requested.  A phase is complete when all packets are hard-ACK'd.
 */
static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
{}

/*
 * Process an ACKALL packet.
 */
static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
{}

/*
 * Process an ABORT packet directed at a call.
 */
static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
{}

/*
 * Process an incoming call packet.
 */
void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
{}

/*
 * Handle a new service call on a channel implicitly completing the preceding
 * call on that channel.  This does not apply to client conns.
 *
 * TODO: If callNumber > call_id + 1, renegotiate security.
 */
void rxrpc_implicit_end_call(struct rxrpc_call *call, struct sk_buff *skb)
{}