linux/drivers/infiniband/hw/cxgb4/qp.c

/*
 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/module.h>
#include <rdma/uverbs_ioctl.h>

#include "iw_cxgb4.h"

static int db_delay_usecs =;
module_param(db_delay_usecs, int, 0644);
MODULE_PARM_DESC();

static int ocqp_support =;
module_param(ocqp_support, int, 0644);
MODULE_PARM_DESC();

int db_fc_threshold =;
module_param(db_fc_threshold, int, 0644);
MODULE_PARM_DESC();

int db_coalescing_threshold;
module_param(db_coalescing_threshold, int, 0644);
MODULE_PARM_DESC();

static int max_fr_immd =;
module_param(max_fr_immd, int, 0644);
MODULE_PARM_DESC();

static int alloc_ird(struct c4iw_dev *dev, u32 ird)
{}

static void free_ird(struct c4iw_dev *dev, int ird)
{}

static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
{}

static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
{}

static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
{}

static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
{}

static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
{}

static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
{}

static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
{}

static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
		      struct c4iw_dev_ucontext *uctx, int has_rq)
{}

/*
 * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
 * then this is a user mapping so compute the page-aligned physical address
 * for mapping.
 */
void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
			      enum cxgb4_bar2_qtype qtype,
			      unsigned int *pbar2_qid, u64 *pbar2_pa)
{}

static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
		     struct t4_cq *rcq, struct t4_cq *scq,
		     struct c4iw_dev_ucontext *uctx,
		     struct c4iw_wr_wait *wr_waitp,
		     int need_rq)
{}

static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
		      const struct ib_send_wr *wr, int max, u32 *plenp)
{}

static int build_isgl(__be64 *queue_start, __be64 *queue_end,
		      struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
		      int num_sge, u32 *plenp)

{}

static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
			   const struct ib_send_wr *wr, u8 *len16)
{}

static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
			    const struct ib_send_wr *wr, u8 *len16)
{}

static void build_immd_cmpl(struct t4_sq *sq, struct fw_ri_immd_cmpl *immdp,
			    struct ib_send_wr *wr)
{}

static void build_rdma_write_cmpl(struct t4_sq *sq,
				  struct fw_ri_rdma_write_cmpl_wr *wcwr,
				  const struct ib_send_wr *wr, u8 *len16)
{}

static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr,
			   u8 *len16)
{}

static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
{}

static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
			   const struct ib_recv_wr *wr, u8 *len16)
{}

static int build_srq_recv(union t4_recv_wr *wqe, const struct ib_recv_wr *wr,
			  u8 *len16)
{}

static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
			      const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
			      u8 *len16)
{}

static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
			const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
			u8 *len16, bool dsgl_supported)
{}

static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
			  u8 *len16)
{}

void c4iw_qp_add_ref(struct ib_qp *qp)
{}

void c4iw_qp_rem_ref(struct ib_qp *qp)
{}

static void add_to_fc_list(struct list_head *head, struct list_head *entry)
{}

static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
{}

static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
{}

static int ib_to_fw_opcode(int ib_opcode)
{}

static int complete_sq_drain_wr(struct c4iw_qp *qhp,
				const struct ib_send_wr *wr)
{}

static int complete_sq_drain_wrs(struct c4iw_qp *qhp,
				 const struct ib_send_wr *wr,
				 const struct ib_send_wr **bad_wr)
{}

static void complete_rq_drain_wr(struct c4iw_qp *qhp,
				 const struct ib_recv_wr *wr)
{}

static void complete_rq_drain_wrs(struct c4iw_qp *qhp,
				  const struct ib_recv_wr *wr)
{}

int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
		   const struct ib_send_wr **bad_wr)
{}

int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
		      const struct ib_recv_wr **bad_wr)
{}

static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe,
			 u64 wr_id, u8 len16)
{}

int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
		       const struct ib_recv_wr **bad_wr)
{}

static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
				    u8 *ecode)
{}

static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
			   gfp_t gfp)
{}

/*
 * Assumes qhp lock is held.
 */
static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
		       struct c4iw_cq *schp)
{}

static void flush_qp(struct c4iw_qp *qhp)
{}

static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
		     struct c4iw_ep *ep)
{}

static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
{}

static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
{}

int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
		   enum c4iw_qp_attr_mask mask,
		   struct c4iw_qp_attributes *attrs,
		   int internal)
{}

int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{}

int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
		   struct ib_udata *udata)
{}

int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
		      int attr_mask, struct ib_udata *udata)
{}

struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
{}

void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq)
{}

int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
		    enum ib_srq_attr_mask srq_attr_mask,
		    struct ib_udata *udata)
{}

int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
		     int attr_mask, struct ib_qp_init_attr *init_attr)
{}

static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
			   struct c4iw_wr_wait *wr_waitp)
{}

static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
			   struct c4iw_wr_wait *wr_waitp)
{}

void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16)
{}

int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
			       struct ib_udata *udata)
{}

int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{}