linux/drivers/infiniband/hw/cxgb4/t4.h

/*
 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#ifndef __T4_H__
#define __T4_H__

#include "t4_hw.h"
#include "t4_regs.h"
#include "t4_values.h"
#include "t4_msg.h"
#include "t4_tcb.h"
#include "t4fw_ri_api.h"

#define T4_MAX_NUM_PD
#define T4_MAX_MR_SIZE
#define T4_PAGESIZE_MASK
#define T4_STAG_UNSET
#define T4_FW_MAJ
#define PCIE_MA_SYNC_A

struct t4_status_page {};

#define T4_RQT_ENTRY_SHIFT
#define T4_RQT_ENTRY_SIZE
#define T4_EQ_ENTRY_SIZE

#define T4_SQ_NUM_SLOTS
#define T4_SQ_NUM_BYTES
#define T4_MAX_SEND_SGE
#define T4_MAX_SEND_INLINE
#define T4_MAX_WRITE_INLINE
#define T4_MAX_WRITE_SGE
#define T4_MAX_FR_IMMD
#define T4_MAX_FR_IMMD_DEPTH
#define T4_MAX_FR_DSGL
#define T4_MAX_FR_DSGL_DEPTH

static inline int t4_max_fr_depth(int use_dsgl)
{}

#define T4_RQ_NUM_SLOTS
#define T4_RQ_NUM_BYTES
#define T4_MAX_RECV_SGE

#define T4_WRITE_CMPL_MAX_SGL
#define T4_WRITE_CMPL_MAX_CQE

t4_wr;

t4_recv_wr;

static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
			       enum fw_wr_opcodes opcode, u8 flags, u8 len16)
{}

/* CQE/AE status codes */
#define T4_ERR_SUCCESS
#define T4_ERR_STAG
						/* STAG is offlimt, being 0, */
						/* or STAG_key mismatch */
#define T4_ERR_PDID
#define T4_ERR_QPID
#define T4_ERR_ACCESS
#define T4_ERR_WRAP
#define T4_ERR_BOUND
#define T4_ERR_INVALIDATE_SHARED_MR
						/* shared memory region */
#define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND
						/* shared memory region */
#define T4_ERR_ECC
#define T4_ERR_ECC_PSTAG
						/* reading PSTAG for a MW  */
						/* Invalidate */
#define T4_ERR_PBL_ADDR_BOUND
						/* software error */
#define T4_ERR_SWFLUSH
#define T4_ERR_CRC
#define T4_ERR_MARKER
#define T4_ERR_PDU_LEN_ERR
#define T4_ERR_OUT_OF_RQE
#define T4_ERR_DDP_VERSION
#define T4_ERR_RDMA_VERSION
#define T4_ERR_OPCODE
#define T4_ERR_DDP_QUEUE_NUM
#define T4_ERR_MSN
#define T4_ERR_TBIT
#define T4_ERR_MO
						/* or READ_REQ */
#define T4_ERR_MSN_GAP
#define T4_ERR_MSN_RANGE
#define T4_ERR_IRD_OVERFLOW
#define T4_ERR_RQE_ADDR_BOUND
						/* software error */
#define T4_ERR_INTERNAL_ERR
						/* mismatch) */
/*
 * CQE defs
 */
struct t4_cqe {};

/* macros for flit 0 of the cqe */

#define CQE_QPID_S
#define CQE_QPID_M
#define CQE_QPID_G(x)
#define CQE_QPID_V(x)

#define CQE_SWCQE_S
#define CQE_SWCQE_M
#define CQE_SWCQE_G(x)
#define CQE_SWCQE_V(x)

#define CQE_DRAIN_S
#define CQE_DRAIN_M
#define CQE_DRAIN_G(x)
#define CQE_DRAIN_V(x)

#define CQE_STATUS_S
#define CQE_STATUS_M
#define CQE_STATUS_G(x)
#define CQE_STATUS_V(x)

#define CQE_TYPE_S
#define CQE_TYPE_M
#define CQE_TYPE_G(x)
#define CQE_TYPE_V(x)

#define CQE_OPCODE_S
#define CQE_OPCODE_M
#define CQE_OPCODE_G(x)
#define CQE_OPCODE_V(x)

#define SW_CQE(x)
#define DRAIN_CQE(x)
#define CQE_QPID(x)
#define CQE_TYPE(x)
#define SQ_TYPE(x)
#define RQ_TYPE(x)
#define CQE_STATUS(x)
#define CQE_OPCODE(x)

#define CQE_SEND_OPCODE(x)

#define CQE_LEN(x)

/* used for RQ completion processing */
#define CQE_WRID_STAG(x)
#define CQE_WRID_MSN(x)
#define CQE_ABS_RQE_IDX(x)
#define CQE_IMM_DATA(x)

/* used for SQ completion processing */
#define CQE_WRID_SQ_IDX(x)
#define CQE_WRID_FR_STAG(x)

/* generic accessor macros */
#define CQE_WRID_HI(x)
#define CQE_WRID_LOW(x)
#define CQE_DRAIN_COOKIE(x)

/* macros for flit 3 of the cqe */
#define CQE_GENBIT_S
#define CQE_GENBIT_M
#define CQE_GENBIT_G(x)
#define CQE_GENBIT_V(x)

#define CQE_OVFBIT_S
#define CQE_OVFBIT_M
#define CQE_OVFBIT_G(x)

#define CQE_IQTYPE_S
#define CQE_IQTYPE_M
#define CQE_IQTYPE_G(x)

#define CQE_TS_M
#define CQE_TS_G(x)

#define CQE_OVFBIT(x)
#define CQE_GENBIT(x)
#define CQE_TS(x)

struct t4_swsqe {};

static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
{}

enum {};

struct t4_sq {};

struct t4_swrqe {};

struct t4_rq {};

struct t4_wq {};

struct t4_srq_pending_wr {};

struct t4_srq {};

static inline u32 t4_srq_avail(struct t4_srq *srq)
{}

static inline void t4_srq_produce(struct t4_srq *srq, u8 len16)
{}

static inline void t4_srq_produce_pending_wr(struct t4_srq *srq)
{}

static inline void t4_srq_consume_pending_wr(struct t4_srq *srq)
{}

static inline void t4_srq_produce_ooo(struct t4_srq *srq)
{}

static inline void t4_srq_consume_ooo(struct t4_srq *srq)
{}

static inline void t4_srq_consume(struct t4_srq *srq)
{}

static inline int t4_rqes_posted(struct t4_wq *wq)
{}

static inline int t4_rq_empty(struct t4_wq *wq)
{}

static inline u32 t4_rq_avail(struct t4_wq *wq)
{}

static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
{}

static inline void t4_rq_consume(struct t4_wq *wq)
{}

static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq)
{}

static inline u16 t4_rq_wq_size(struct t4_wq *wq)
{}

static inline int t4_sq_onchip(struct t4_sq *sq)
{}

static inline int t4_sq_empty(struct t4_wq *wq)
{}

static inline u32 t4_sq_avail(struct t4_wq *wq)
{}

static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
{}

static inline void t4_sq_consume(struct t4_wq *wq)
{}

static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq)
{}

static inline u16 t4_sq_wq_size(struct t4_wq *wq)
{}

/* This function copies 64 byte coalesced work request to memory
 * mapped BAR2 space. For coalesced WRs, the SGE fetches data
 * from the FIFO instead of from Host.
 */
static inline void pio_copy(u64 __iomem *dst, u64 *src)
{}

static inline void t4_ring_srq_db(struct t4_srq *srq, u16 inc, u8 len16,
				  union t4_recv_wr *wqe)
{}

static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
{}

static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
				 union t4_recv_wr *wqe)
{}

static inline int t4_wq_in_error(struct t4_wq *wq)
{}

static inline void t4_set_wq_in_error(struct t4_wq *wq, u32 srqidx)
{}

static inline void t4_disable_wq_db(struct t4_wq *wq)
{}

static inline void t4_enable_wq_db(struct t4_wq *wq)
{}

enum t4_cq_flags {};

struct t4_cq {};

static inline void write_gts(struct t4_cq *cq, u32 val)
{}

static inline int t4_clear_cq_armed(struct t4_cq *cq)
{}

static inline int t4_arm_cq(struct t4_cq *cq, int se)
{}

static inline void t4_swcq_produce(struct t4_cq *cq)
{}

static inline void t4_swcq_consume(struct t4_cq *cq)
{}

static inline void t4_hwcq_consume(struct t4_cq *cq)
{}

static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
{}

static inline int t4_cq_notempty(struct t4_cq *cq)
{}

static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{}

static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{}

static inline void t4_set_cq_in_error(struct t4_cq *cq)
{}
#endif

struct t4_dev_status_page {};