linux/drivers/infiniband/hw/mlx4/qp.c

/*
 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/log2.h>
#include <linux/etherdevice.h>
#include <net/ip.h>
#include <linux/slab.h>
#include <linux/netdevice.h>

#include <rdma/ib_cache.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_mad.h>
#include <rdma/uverbs_ioctl.h>

#include <linux/mlx4/driver.h>
#include <linux/mlx4/qp.h>

#include "mlx4_ib.h"
#include <rdma/mlx4-abi.h>

static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
			     struct mlx4_ib_cq *recv_cq);
static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
			       struct mlx4_ib_cq *recv_cq);
static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state,
			      struct ib_udata *udata);

enum {};

enum {};

enum {};

enum {};

#ifndef ETH_ALEN
#define ETH_ALEN
#endif

static const __be32 mlx4_ib_opcode[] =;

enum mlx4_ib_source_type {};

struct mlx4_ib_qp_event_work {};

static struct workqueue_struct *mlx4_ib_qp_event_wq;

static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
{}

static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
{}

/* used for INIT/CLOSE port logic */
static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
{}

static void *get_wqe(struct mlx4_ib_qp *qp, int offset)
{}

static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n)
{}

static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
{}

/*
 * Stamp a SQ WQE so that it is invalid if prefetched by marking the
 * first four bytes of every 64 byte chunk with 0xffffffff, except for
 * the very first chunk of the WQE.
 */
static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n)
{}

static void mlx4_ib_handle_qp_event(struct work_struct *_work)
{}

static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
{}

static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type)
{}

static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
{}

static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
		       bool is_user, bool has_rq, struct mlx4_ib_qp *qp,
		       u32 inl_recv_sz)
{}

static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
			      enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp)
{}

static int set_user_sq_size(struct mlx4_ib_dev *dev,
			    struct mlx4_ib_qp *qp,
			    struct mlx4_ib_create_qp *ucmd)
{}

static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
{}

static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
{}

static bool qp_has_rq(struct ib_qp_init_attr *attr)
{}

static int qp0_enabled_vf(struct mlx4_dev *dev, int qpn)
{}

static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev,
				    struct mlx4_ib_qp *qp)
{}

static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
		      struct ib_qp_init_attr *init_attr,
		      struct mlx4_ib_create_qp_rss *ucmd)
{}

static int create_qp_rss(struct mlx4_ib_dev *dev,
			 struct ib_qp_init_attr *init_attr,
			 struct mlx4_ib_create_qp_rss *ucmd,
			 struct mlx4_ib_qp *qp)
{}

static int _mlx4_ib_create_qp_rss(struct ib_pd *pd, struct mlx4_ib_qp *qp,
				  struct ib_qp_init_attr *init_attr,
				  struct ib_udata *udata)
{}

/*
 * This function allocates a WQN from a range which is consecutive and aligned
 * to its size. In case the range is full, then it creates a new range and
 * allocates WQN from it. The new range will be used for following allocations.
 */
static int mlx4_ib_alloc_wqn(struct mlx4_ib_ucontext *context,
			     struct mlx4_ib_qp *qp, int range_size, int *wqn)
{}

static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext *context,
				struct mlx4_ib_qp *qp, bool dirty_release)
{}

static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
		     struct ib_udata *udata, struct mlx4_ib_qp *qp)
{}

static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
			    struct ib_udata *udata, int sqpn,
			    struct mlx4_ib_qp *qp)
{}

static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
{}

static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
	__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
{}

static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
	__releases(&send_cq->lock) __releases(&recv_cq->lock)
{}

static void del_gid_entries(struct mlx4_ib_qp *qp)
{}

static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp)
{}

static void get_cqs(struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src,
		    struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq)
{}

static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
{}

static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
			      enum mlx4_ib_source_type src,
			      struct ib_udata *udata)
{}

static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr)
{}

static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp,
			      struct ib_qp_init_attr *init_attr,
			      struct ib_udata *udata)
{}

int mlx4_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
		      struct ib_udata *udata)
{}

static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{}

int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{}

static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
{}

static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr,
				   int attr_mask)
{}

static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr,
			    int attr_mask)
{}

static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
{}

static int _mlx4_set_path(struct mlx4_ib_dev *dev,
			  const struct rdma_ah_attr *ah,
			  u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
			  struct mlx4_roce_smac_vlan_info *smac_info, u8 port)
{}

static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
			 enum ib_qp_attr_mask qp_attr_mask,
			 struct mlx4_ib_qp *mqp,
			 struct mlx4_qp_path *path, u8 port,
			 u16 vlan_id, u8 *smac)
{}

static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
			     const struct ib_qp_attr *qp,
			     enum ib_qp_attr_mask qp_attr_mask,
			     struct mlx4_ib_qp *mqp,
			     struct mlx4_qp_path *path, u8 port)
{}

static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
{}

static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev,
				    struct mlx4_ib_qp *qp,
				    struct mlx4_qp_context *context)
{}

static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
{}

enum {};

static u8 gid_type_to_qpc(enum ib_gid_type gid_type)
{}

/*
 * Go over all RSS QP's childes (WQs) and apply their HW state according to
 * their logic state if the RSS QP is the first RSS QP associated for the WQ.
 */
static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num,
			    struct ib_udata *udata)
{}

static void bring_down_rss_rwqs(struct ib_rwq_ind_table *ind_tbl,
				struct ib_udata *udata)
{}

static void fill_qp_rss_context(struct mlx4_qp_context *context,
				struct mlx4_ib_qp *qp)
{}

static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
			       const struct ib_qp_attr *attr, int attr_mask,
			       enum ib_qp_state cur_state,
			       enum ib_qp_state new_state,
			       struct ib_udata *udata)
{}

enum {};

static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
			      int attr_mask, struct ib_udata *udata)
{}

int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
		      int attr_mask, struct ib_udata *udata)
{}

static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
{}

static int build_sriov_qp0_header(struct mlx4_ib_qp *qp,
				  const struct ib_ud_wr *wr,
				  void *wqe, unsigned *mlx_seg_len)
{}

static u8 sl_to_vl(struct mlx4_ib_dev *dev, u8 sl, int port_num)
{}

static int fill_gid_by_hw_index(struct mlx4_ib_dev *ibdev, u8 port_num,
				int index, union ib_gid *gid,
				enum ib_gid_type *gid_type)
{}

#define MLX4_ROCEV2_QP1_SPORT
static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr,
			    void *wqe, unsigned *mlx_seg_len)
{}

static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
{}

static __be32 convert_access(int acc)
{}

static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg,
			const struct ib_reg_wr *wr)
{}

static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
{}

static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
					  u64 remote_addr, u32 rkey)
{}

static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
			   const struct ib_atomic_wr *wr)
{}

static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
				  const struct ib_atomic_wr *wr)
{}

static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
			     const struct ib_ud_wr *wr)
{}

static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
				    struct mlx4_wqe_datagram_seg *dseg,
				    const struct ib_ud_wr *wr,
				    enum mlx4_ib_qp_type qpt)
{}

static void build_tunnel_header(const struct ib_ud_wr *wr, void *wqe,
				unsigned *mlx_seg_len)
{}

static void set_mlx_icrc_seg(void *dseg)
{}

static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
{}

static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
{}

static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe,
			 const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp,
			 unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh)
{}

static __be32 send_ieth(const struct ib_send_wr *wr)
{}

static void add_zero_len_inline(void *wqe)
{}

static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
			      const struct ib_send_wr **bad_wr, bool drain)
{}

int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
		      const struct ib_send_wr **bad_wr)
{}

static int _mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
			      const struct ib_recv_wr **bad_wr, bool drain)
{}

int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
		      const struct ib_recv_wr **bad_wr)
{}

static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state)
{}

static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state)
{}

static int to_ib_qp_access_flags(int mlx4_flags)
{}

static void to_rdma_ah_attr(struct mlx4_ib_dev *ibdev,
			    struct rdma_ah_attr *ah_attr,
			    struct mlx4_qp_path *path)
{}

int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
		     struct ib_qp_init_attr *qp_init_attr)
{}

struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
				struct ib_wq_init_attr *init_attr,
				struct ib_udata *udata)
{}

static int ib_wq2qp_state(enum ib_wq_state state)
{}

static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state,
			      struct ib_udata *udata)
{}

int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
		      u32 wq_attr_mask, struct ib_udata *udata)
{}

int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
{}

int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table,
				 struct ib_rwq_ind_table_init_attr *init_attr,
				 struct ib_udata *udata)
{}

struct mlx4_ib_drain_cqe {};

static void mlx4_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
{}

/* This function returns only once the drained WR was completed */
static void handle_drain_completion(struct ib_cq *cq,
				    struct mlx4_ib_drain_cqe *sdrain,
				    struct mlx4_ib_dev *dev)
{}

void mlx4_ib_drain_sq(struct ib_qp *qp)
{}

void mlx4_ib_drain_rq(struct ib_qp *qp)
{}

int mlx4_ib_qp_event_init(void)
{}

void mlx4_ib_qp_event_cleanup(void)
{}