linux/drivers/net/ethernet/qlogic/qed/qed_iwarp.c

// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qed NIC Driver
 * Copyright (c) 2015-2017  QLogic Corporation
 * Copyright (c) 2019-2020 Marvell International Ltd.
 */

#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/spinlock.h>
#include <linux/tcp.h>
#include "qed_cxt.h"
#include "qed_hw.h"
#include "qed_ll2.h"
#include "qed_rdma.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_ooo.h"

#define QED_IWARP_ORD_DEFAULT
#define QED_IWARP_IRD_DEFAULT
#define QED_IWARP_MAX_FW_MSS

#define QED_EP_SIG

struct mpa_v2_hdr {};

#define MPA_V2_PEER2PEER_MODEL
#define MPA_V2_SEND_RTR
#define MPA_V2_READ_RTR
#define MPA_V2_WRITE_RTR
#define MPA_V2_IRD_ORD_MASK

#define MPA_REV2(_mpa_rev)

#define QED_IWARP_INVALID_TCP_CID

#define QED_IWARP_RCV_WND_SIZE_DEF_BB_2P
#define QED_IWARP_RCV_WND_SIZE_DEF_BB_4P
#define QED_IWARP_RCV_WND_SIZE_DEF_AH_2P
#define QED_IWARP_RCV_WND_SIZE_DEF_AH_4P

#define QED_IWARP_RCV_WND_SIZE_MIN
#define TIMESTAMP_HEADER_SIZE
#define QED_IWARP_MAX_FIN_RT_DEFAULT

#define QED_IWARP_TS_EN
#define QED_IWARP_DA_EN
#define QED_IWARP_PARAM_CRC_NEEDED
#define QED_IWARP_PARAM_P2P

#define QED_IWARP_DEF_MAX_RT_TIME
#define QED_IWARP_DEF_CWND_FACTOR
#define QED_IWARP_DEF_KA_MAX_PROBE_CNT
#define QED_IWARP_DEF_KA_TIMEOUT
#define QED_IWARP_DEF_KA_INTERVAL

static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
				 __le16 echo, union event_ring_data *data,
				 u8 fw_return_code);

/* Override devinfo with iWARP specific values */
void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
{}

void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{}

/* We have two cid maps, one for tcp which should be used only from passive
 * syn processing and replacing a pre-allocated ep in the list. The second
 * for active tcp and for QPs.
 */
static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
{}

void
qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
			 struct iwarp_init_func_ramrod_data *p_ramrod)
{}

static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
{}

static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
{}

/* This function allocates a cid for passive tcp (called from syn receive)
 * the reason it's separate from the regular cid allocation is because it
 * is assured that these cids already have ilt allocated. They are preallocated
 * to ensure that we won't need to allocate memory during syn processing
 */
static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
{}

int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
			struct qed_rdma_qp *qp,
			struct qed_rdma_create_qp_out_params *out_params)
{}

static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{}

enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
{}

static enum qed_roce_qp_state
qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
{}

static const char * const iwarp_state_names[] =;

int
qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
		    struct qed_rdma_qp *qp,
		    enum qed_iwarp_qp_state new_state, bool internal)
{}

int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{}

static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
				 struct qed_iwarp_ep *ep,
				 bool remove_from_active_list)
{}

int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{}

static int
qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
{}

static void
qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
			   struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
{}

static int
qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{}

static void
qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{}

static int
qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{}

static void
qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{}

static void
qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{}

static void
qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{}

#define QED_IWARP_CONNECT_MODE_STRING(ep)

/* Called as a result of the event:
 * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
 */
static void
qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
		       struct qed_iwarp_ep *ep, u8 fw_return_code)
{}

static void
qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
			     struct qed_iwarp_ep *ep, u8 *mpa_data_size)
{}

int qed_iwarp_connect(void *rdma_cxt,
		      struct qed_iwarp_connect_in *iparams,
		      struct qed_iwarp_connect_out *oparams)
{}

static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
{}

#define QED_IWARP_MAX_CID_CLEAN_TIME
#define QED_IWARP_MAX_NO_PROGRESS_CNT

/* This function waits for all the bits of a bmap to be cleared, as long as
 * there is progress ( i.e. the number of bits left to be cleared decreases )
 * the function continues.
 */
static int
qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
{}

static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
{}

static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
{}

static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
{}

int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
{}

void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
{}

int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
{}

int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
{}

static void
qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
			struct qed_iwarp_cm_info *cm_info)
{}

static int
qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
		      struct qed_iwarp_ll2_buff *buf, u8 handle)
{}

static bool
qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
{}

static struct qed_iwarp_listener *
qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
		       struct qed_iwarp_cm_info *cm_info)
{}

static int
qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
		       struct qed_iwarp_cm_info *cm_info,
		       void *buf,
		       u8 *remote_mac_addr,
		       u8 *local_mac_addr,
		       int *payload_len, int *tcp_start_offset)
{}

static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
						      u16 cid)
{}

enum qed_iwarp_mpa_pkt_type {};

#define QED_IWARP_INVALID_FPDU_LENGTH
#define QED_IWARP_MPA_FPDU_LENGTH_SIZE
#define QED_IWARP_MPA_CRC32_DIGEST_SIZE

/* Pad to multiple of 4 */
#define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len)
#define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len)

/* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
#define QED_IWARP_MAX_BDS_PER_FPDU

static const char * const pkt_type_str[] =;

static int
qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
		      struct qed_iwarp_fpdu *fpdu,
		      struct qed_iwarp_ll2_buff *buf);

static enum qed_iwarp_mpa_pkt_type
qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
		       struct qed_iwarp_fpdu *fpdu,
		       u16 tcp_payload_len, u8 *mpa_data)
{}

static void
qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
		    struct qed_iwarp_fpdu *fpdu,
		    struct unaligned_opaque_data *pkt_data,
		    u16 tcp_payload_size, u8 placement_offset)
{}

static int
qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
		 struct qed_iwarp_fpdu *fpdu,
		 struct unaligned_opaque_data *pkt_data,
		 struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
{}

static void
qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
			     struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
{}

#define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt)

/* This function is used to recycle a buffer using the ll2 drop option. It
 * uses the mechanism to ensure that all buffers posted to tx before this one
 * were completed. The buffer sent here will be sent as a cookie in the tx
 * completion function and can then be reposted to rx chain when done. The flow
 * that requires this is the flow where a FPDU splits over more than 3 tcp
 * segments. In this case the driver needs to re-post a rx buffer instead of
 * the one received, but driver can't simply repost a buffer it copied from
 * as there is a case where the buffer was originally a packed FPDU, and is
 * partially posted to FW. Driver needs to ensure FW is done with it.
 */
static int
qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
		      struct qed_iwarp_fpdu *fpdu,
		      struct qed_iwarp_ll2_buff *buf)
{}

static int
qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
{}

static int
qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
		    struct qed_iwarp_fpdu *fpdu,
		    struct unaligned_opaque_data *curr_pkt,
		    struct qed_iwarp_ll2_buff *buf,
		    u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
{}

static void
qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
		       struct unaligned_opaque_data *curr_pkt,
		       u32 opaque_data0, u32 opaque_data1)
{}

/* This function is called when an unaligned or incomplete MPA packet arrives
 * driver needs to align the packet, perhaps using previous data and send
 * it down to FW once it is aligned.
 */
static int
qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
			  struct qed_iwarp_ll2_mpa_buf *mpa_buf)
{}

static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
{}

static void
qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
{}

static void
qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
{}

static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
				     void *cookie, dma_addr_t rx_buf_addr,
				     bool b_last_packet)
{}

static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
				      void *cookie, dma_addr_t first_frag_addr,
				      bool b_last_fragment, bool b_last_packet)
{}

static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
				     void *cookie, dma_addr_t first_frag_addr,
				     bool b_last_fragment, bool b_last_packet)
{}

/* The only slowpath for iwarp ll2 is unalign flush. When this completion
 * is received, need to reset the FPDU.
 */
static void
qed_iwarp_ll2_slowpath(void *cxt,
		       u8 connection_handle,
		       u32 opaque_data_0, u32 opaque_data_1)
{}

static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn)
{}

static int
qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
			    int num_rx_bufs, int buff_size, u8 ll2_handle)
{}

#define QED_IWARP_MAX_BUF_SIZE(mtu)

static int
qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
		    struct qed_rdma_start_in_params *params,
		    u32 rcv_wnd_size)
{}

static struct {} qed_iwarp_rcv_wnd_size[MAX_CHIP_IDS] =;

int qed_iwarp_setup(struct qed_hwfn *p_hwfn,
		    struct qed_rdma_start_in_params *params)
{}

int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
{}

static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
				  struct qed_iwarp_ep *ep,
				  u8 fw_return_code)
{}

static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
					 struct qed_iwarp_ep *ep,
					 int fw_ret_code)
{}

static void
qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
				   struct qed_iwarp_ep *ep, u8 fw_return_code)
{}

static void
qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
			   struct qed_iwarp_ep *ep, u8 fw_return_code)
{}

static inline bool
qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{}

static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
				 __le16 echo, union event_ring_data *data,
				 u8 fw_return_code)
{}

int
qed_iwarp_create_listen(void *rdma_cxt,
			struct qed_iwarp_listen_in *iparams,
			struct qed_iwarp_listen_out *oparams)
{}

int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
{}

int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
{}

void
qed_iwarp_query_qp(struct qed_rdma_qp *qp,
		   struct qed_rdma_query_qp_out_params *out_params)
{}