linux/drivers/net/ipa/ipa_endpoint.c

// SPDX-License-Identifier: GPL-2.0

/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
 * Copyright (C) 2019-2024 Linaro Ltd.
 */

#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/if_rmnet.h>
#include <linux/types.h>

#include "gsi.h"
#include "gsi_trans.h"
#include "ipa.h"
#include "ipa_cmd.h"
#include "ipa_data.h"
#include "ipa_endpoint.h"
#include "ipa_gsi.h"
#include "ipa_interrupt.h"
#include "ipa_mem.h"
#include "ipa_modem.h"
#include "ipa_power.h"
#include "ipa_reg.h"
#include "ipa_table.h"
#include "ipa_version.h"

/* Hardware is told about receive buffers once a "batch" has been queued */
#define IPA_REPLENISH_BATCH

/* The amount of RX buffer space consumed by standard skb overhead */
#define IPA_RX_BUFFER_OVERHEAD

/* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
#define IPA_ENDPOINT_QMAP_METADATA_MASK

#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX

/** enum ipa_status_opcode - IPA status opcode field hardware values */
enum ipa_status_opcode {};

/** enum ipa_status_exception - IPA status exception field hardware values */
enum ipa_status_exception {};

/** enum ipa_status_mask - IPA status mask field bitmask hardware values */
enum ipa_status_mask {};

/* Special IPA filter/router rule field value indicating "rule miss" */
#define IPA_STATUS_RULE_MISS

/** The IPA status nat_type field uses enum ipa_nat_type hardware values */

/* enum ipa_status_field_id - IPA packet status structure field identifiers */
enum ipa_status_field_id {};

/* Size in bytes of an IPA packet status structure */
#define IPA_STATUS_SIZE

/* IPA status structure decoder; looks up field values for a structure */
static u32 ipa_status_extract(struct ipa *ipa, const void *data,
			      enum ipa_status_field_id field)
{}

/* Compute the aggregation size value to use for a given buffer size */
static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
{}

static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
			    const struct ipa_gsi_endpoint_data *all_data,
			    const struct ipa_gsi_endpoint_data *data)
{}

/* Validate endpoint configuration data.  Return max defined endpoint ID */
static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
			    const struct ipa_gsi_endpoint_data *data)
{}

/* Allocate a transaction to use on a non-command endpoint */
static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
						  u32 tre_count)
{}

/* suspend_delay represents suspend for RX, delay for TX endpoints.
 * Note that suspend is not supported starting with IPA v4.0, and
 * delay mode should not be used starting with IPA v4.2.
 */
static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
{}

/* We don't care what the previous state was for delay mode */
static void
ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
{}

static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
{}

static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
{}

/**
 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
 * @endpoint:	Endpoint on which to emulate a suspend
 *
 *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
 *  with an open aggregation frame.  This is to work around a hardware
 *  issue in IPA version 3.5.1 where the suspend interrupt will not be
 *  generated when it should be.
 */
static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
{}

/* Returns previous suspend state (true means suspend was enabled) */
static bool
ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
{}

/* Put all modem RX endpoints into suspend mode, and stop transmission
 * on all modem TX endpoints.  Prior to IPA v4.2, endpoint DELAY mode is
 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
 * control instead.
 */
void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
{}

/* Reset all modem endpoints to use the default exception endpoint */
int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
{}

static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
{}

static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
{}

static u32
ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
{}

/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
static u32 ipa_header_size_encode(enum ipa_version version,
				  const struct reg *reg, u32 header_size)
{}

/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
static u32 ipa_metadata_offset_encode(enum ipa_version version,
				      const struct reg *reg, u32 offset)
{}

/**
 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
 * @endpoint:	Endpoint pointer
 *
 * We program QMAP endpoints so each packet received is preceded by a QMAP
 * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
 * packet size field, and we have the IPA hardware populate both for each
 * received packet.  The header is configured (in the HDR_EXT register)
 * to use big endian format.
 *
 * The packet size is written into the QMAP header's pkt_len field.  That
 * location is defined here using the HDR_OFST_PKT_SIZE field.
 *
 * The mux_id comes from a 4-byte metadata value supplied with each packet
 * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
 * value that we want, in its low-order byte.  A bitmask defined in the
 * endpoint's METADATA_MASK register defines which byte within the modem
 * metadata contains the mux_id.  And the OFST_METADATA field programmed
 * here indicates where the extracted byte should be placed within the QMAP
 * header.
 */
static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{}

static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
{}

static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
{}

static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
{}

/* For IPA v4.5+, times are expressed using Qtime.  A time is represented
 * at one of several available granularities, which are configured in
 * ipa_qtime_config().  Three (or, starting with IPA v5.0, four) pulse
 * generators are set up with different "tick" periods.  A Qtime value
 * encodes a tick count along with an indication of a pulse generator
 * (which has a fixed tick period).  Two pulse generators are always
 * available to the AP; a third is available starting with IPA v5.0.
 * This function determines which pulse generator most accurately
 * represents the time period provided, and returns the tick count to
 * use to represent that time.
 */
static u32
ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
{}

/* Encode the aggregation timer limit (microseconds) based on IPA version */
static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg,
				  u32 microseconds)
{}

static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
{}

/* The head-of-line blocking timer is defined as a tick count.  For
 * IPA version 4.5 the tick count is based on the Qtimer, which is
 * derived from the 19.2 MHz SoC XO clock.  For older IPA versions
 * each tick represents 128 cycles of the IPA core clock.
 *
 * Return the encoded value representing the timeout period provided
 * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
 */
static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg,
				  u32 microseconds)
{}

/* If microseconds is 0, timeout is immediate */
static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
					      u32 microseconds)
{}

static void
ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
{}

/* Assumes HOL_BLOCK is in disabled state */
static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
					       u32 microseconds)
{}

static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
{}

void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
{}

static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
{}

static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
{}

static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{}

/**
 * ipa_endpoint_skb_tx() - Transmit a socket buffer
 * @endpoint:	Endpoint pointer
 * @skb:	Socket buffer to send
 *
 * Returns:	0 if successful, or a negative error code
 */
int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
{}

static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
{}

static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
				      struct gsi_trans *trans)
{}

/**
 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
 * @endpoint:	Endpoint to be replenished
 *
 * The IPA hardware can hold a fixed number of receive buffers for an RX
 * endpoint, based on the number of entries in the underlying channel ring
 * buffer.  If an endpoint's "backlog" is non-zero, it indicates how many
 * more receive buffers can be supplied to the hardware.  Replenishing for
 * an endpoint can be disabled, in which case buffers are not queued to
 * the hardware.
 */
static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
{}

static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
{}

static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
{}

static void ipa_endpoint_replenish_work(struct work_struct *work)
{}

static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
				  void *data, u32 len, u32 extra)
{}

static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
				   struct page *page, u32 len)
{}

 /* The format of an IPA packet status structure is the same for several
  * status types (opcodes).  Other types aren't currently supported.
 */
static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
{}

static bool
ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, const void *data)
{}

static bool
ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
{}

/* Return whether the status indicates the packet should be dropped */
static bool
ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, const void *data)
{}

static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
				      struct page *page, u32 total_len)
{}

void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
				 struct gsi_trans *trans)
{}

void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
				struct gsi_trans *trans)
{}

void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
{}

void ipa_endpoint_default_route_clear(struct ipa *ipa)
{}

/**
 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
 * @endpoint:	Endpoint to be reset
 *
 * If aggregation is active on an RX endpoint when a reset is performed
 * on its underlying GSI channel, a special sequence of actions must be
 * taken to ensure the IPA pipeline is properly cleared.
 *
 * Return:	0 if successful, or a negative error code
 */
static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
{}

static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
{}

static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
{}

int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
{}

void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
{}

void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
{}

void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
{}

void ipa_endpoint_suspend(struct ipa *ipa)
{}

void ipa_endpoint_resume(struct ipa *ipa)
{}

static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
{}

static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
{}

void ipa_endpoint_setup(struct ipa *ipa)
{}

void ipa_endpoint_teardown(struct ipa *ipa)
{}

void ipa_endpoint_deconfig(struct ipa *ipa)
{}

int ipa_endpoint_config(struct ipa *ipa)
{}

static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
				  const struct ipa_gsi_endpoint_data *data)
{}

static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
{}

void ipa_endpoint_exit(struct ipa *ipa)
{}

/* Returns a bitmask of endpoints that support filtering, or 0 on error */
int ipa_endpoint_init(struct ipa *ipa, u32 count,
		      const struct ipa_gsi_endpoint_data *data)
{}