linux/drivers/infiniband/sw/rxe/rxe_comp.c

// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
 */

#include <linux/skbuff.h>

#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"
#include "rxe_task.h"

enum comp_state {};

static char *comp_state_name[] =;

static unsigned long rnrnak_usec[32] =;

static inline unsigned long rnrnak_jiffies(u8 timeout)
{}

static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
{}

void retransmit_timer(struct timer_list *t)
{}

void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
{}

static inline enum comp_state get_wqe(struct rxe_qp *qp,
				      struct rxe_pkt_info *pkt,
				      struct rxe_send_wqe **wqe_p)
{}

static inline void reset_retry_counters(struct rxe_qp *qp)
{}

static inline enum comp_state check_psn(struct rxe_qp *qp,
					struct rxe_pkt_info *pkt,
					struct rxe_send_wqe *wqe)
{}

static inline enum comp_state check_ack(struct rxe_qp *qp,
					struct rxe_pkt_info *pkt,
					struct rxe_send_wqe *wqe)
{}

static inline enum comp_state do_read(struct rxe_qp *qp,
				      struct rxe_pkt_info *pkt,
				      struct rxe_send_wqe *wqe)
{}

static inline enum comp_state do_atomic(struct rxe_qp *qp,
					struct rxe_pkt_info *pkt,
					struct rxe_send_wqe *wqe)
{}

static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
			  struct rxe_cqe *cqe)
{}

/*
 * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
 * ---------8<---------8<-------------
 * ...Note that if a completion error occurs, a Work Completion
 * will always be generated, even if the signaling
 * indicator requests an Unsignaled Completion.
 * ---------8<---------8<-------------
 */
static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{}

static void comp_check_sq_drain_done(struct rxe_qp *qp)
{}

static inline enum comp_state complete_ack(struct rxe_qp *qp,
					   struct rxe_pkt_info *pkt,
					   struct rxe_send_wqe *wqe)
{}

static inline enum comp_state complete_wqe(struct rxe_qp *qp,
					   struct rxe_pkt_info *pkt,
					   struct rxe_send_wqe *wqe)
{}

/* drain incoming response packet queue */
static void drain_resp_pkts(struct rxe_qp *qp)
{}

/* complete send wqe with flush error */
static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{}

/* drain and optionally complete the send queue
 * if unable to complete a wqe, i.e. cq is full, stop
 * completing and flush the remaining wqes
 */
static void flush_send_queue(struct rxe_qp *qp, bool notify)
{}

static void free_pkt(struct rxe_pkt_info *pkt)
{}

/* reset the retry timer if
 * - QP is type RC
 * - there is a packet sent by the requester that
 *   might be acked (we still might get spurious
 *   timeouts but try to keep them as few as possible)
 * - the timeout parameter is set
 * - the QP is alive
 */
static void reset_retry_timer(struct rxe_qp *qp)
{}

int rxe_completer(struct rxe_qp *qp)
{}