linux/drivers/infiniband/hw/hfi1/hfi.h

/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
 * Copyright(c) 2020-2023 Cornelis Networks, Inc.
 * Copyright(c) 2015-2020 Intel Corporation.
 */

#ifndef _HFI1_KERNEL_H
#define _HFI1_KERNEL_H

#include <linux/refcount.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/fs.h>
#include <linux/completion.h>
#include <linux/kref.h>
#include <linux/sched.h>
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/xarray.h>
#include <rdma/ib_hdrs.h>
#include <rdma/opa_addr.h>
#include <linux/rhashtable.h>
#include <rdma/rdma_vt.h>

#include "chip_registers.h"
#include "common.h"
#include "opfn.h"
#include "verbs.h"
#include "pio.h"
#include "chip.h"
#include "mad.h"
#include "qsfp.h"
#include "platform.h"
#include "affinity.h"
#include "msix.h"

/* bumped 1 from s/w major version of TrueScale */
#define HFI1_CHIP_VERS_MAJ

/* don't care about this except printing */
#define HFI1_CHIP_VERS_MIN

/* The Organization Unique Identifier (Mfg code), and its position in GUID */
#define HFI1_OUI
#define HFI1_OUI_LSB

#define DROP_PACKET_OFF
#define DROP_PACKET_ON

#define NEIGHBOR_TYPE_HFI
#define NEIGHBOR_TYPE_SWITCH

#define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES

extern unsigned long hfi1_cap_mask;
#define HFI1_CAP_KGET_MASK(mask, cap)
#define HFI1_CAP_UGET_MASK(mask, cap)
#define HFI1_CAP_KGET(cap)
#define HFI1_CAP_UGET(cap)
#define HFI1_CAP_IS_KSET(cap)
#define HFI1_CAP_IS_USET(cap)
#define HFI1_MISC_GET()
/* Offline Disabled Reason is 4-bits */
#define HFI1_ODR_MASK(rsn)

/*
 * Control context is always 0 and handles the error packets.
 * It also handles the VL15 and multicast packets.
 */
#define HFI1_CTRL_CTXT

/*
 * Driver context will store software counters for each of the events
 * associated with these status registers
 */
#define NUM_CCE_ERR_STATUS_COUNTERS
#define NUM_RCV_ERR_STATUS_COUNTERS
#define NUM_MISC_ERR_STATUS_COUNTERS
#define NUM_SEND_PIO_ERR_STATUS_COUNTERS
#define NUM_SEND_DMA_ERR_STATUS_COUNTERS
#define NUM_SEND_EGRESS_ERR_STATUS_COUNTERS
#define NUM_SEND_ERR_STATUS_COUNTERS
#define NUM_SEND_CTXT_ERR_STATUS_COUNTERS
#define NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS

/*
 * per driver stats, either not device nor port-specific, or
 * summed over all of the devices and ports.
 * They are described by name via ipathfs filesystem, so layout
 * and number of elements can change without breaking compatibility.
 * If members are added or deleted hfi1_statnames[] in debugfs.c must
 * change to match.
 */
struct hfi1_ib_stats {};

extern struct hfi1_ib_stats hfi1_stats;
extern const struct pci_error_handlers hfi1_pci_err_handler;

extern int num_driver_cntrs;

/*
 * First-cut criterion for "device is active" is
 * two thousand dwords combined Tx, Rx traffic per
 * 5-second interval. SMA packets are 64 dwords,
 * and occur "a few per second", presumably each way.
 */
#define HFI1_TRAFFIC_ACTIVE_THRESHOLD

/*
 * Below contains all data related to a single context (formerly called port).
 */

struct hfi1_opcode_stats_perctx;

struct ctxt_eager_bufs {};

struct exp_tid_set {};

struct hfi1_ctxtdata;
intr_handler;
rhf_rcv_function_ptr;

struct tid_queue {};

struct hfi1_ctxtdata {};

/**
 * rcvhdrq_size - return total size in bytes for header queue
 * @rcd: the receive context
 *
 * rcvhdrqentsize is in DWs, so we have to convert to bytes
 *
 */
static inline u32 rcvhdrq_size(struct hfi1_ctxtdata *rcd)
{}

/*
 * Represents a single packet at a high level. Put commonly computed things in
 * here so we do not have to keep doing them over and over. The rule of thumb is
 * if something is used one time to derive some value, store that something in
 * here. If it is used multiple times, then store the result of that derivation
 * in here.
 */
struct hfi1_packet {};

/* Packet types */
#define HFI1_PKT_TYPE_9B
#define HFI1_PKT_TYPE_16B

/*
 * OPA 16B Header
 */
#define OPA_16B_L4_MASK
#define OPA_16B_SC_MASK
#define OPA_16B_SC_SHIFT
#define OPA_16B_LID_MASK
#define OPA_16B_DLID_MASK
#define OPA_16B_DLID_SHIFT
#define OPA_16B_DLID_HIGH_SHIFT
#define OPA_16B_SLID_MASK
#define OPA_16B_SLID_SHIFT
#define OPA_16B_SLID_HIGH_SHIFT
#define OPA_16B_BECN_MASK
#define OPA_16B_BECN_SHIFT
#define OPA_16B_FECN_MASK
#define OPA_16B_FECN_SHIFT
#define OPA_16B_L2_MASK
#define OPA_16B_L2_SHIFT
#define OPA_16B_PKEY_MASK
#define OPA_16B_PKEY_SHIFT
#define OPA_16B_LEN_MASK
#define OPA_16B_LEN_SHIFT
#define OPA_16B_RC_MASK
#define OPA_16B_RC_SHIFT
#define OPA_16B_AGE_MASK
#define OPA_16B_AGE_SHIFT
#define OPA_16B_ENTROPY_MASK

/*
 * OPA 16B L2/L4 Encodings
 */
#define OPA_16B_L4_9B
#define OPA_16B_L2_TYPE
#define OPA_16B_L4_FM
#define OPA_16B_L4_IB_LOCAL
#define OPA_16B_L4_IB_GLOBAL
#define OPA_16B_L4_ETHR

/*
 * OPA 16B Management
 */
#define OPA_16B_L4_FM_PAD
#define OPA_16B_L4_FM_HLEN

static inline u8 hfi1_16B_get_l4(struct hfi1_16b_header *hdr)
{}

static inline u8 hfi1_16B_get_sc(struct hfi1_16b_header *hdr)
{}

static inline u32 hfi1_16B_get_dlid(struct hfi1_16b_header *hdr)
{}

static inline u32 hfi1_16B_get_slid(struct hfi1_16b_header *hdr)
{}

static inline u8 hfi1_16B_get_becn(struct hfi1_16b_header *hdr)
{}

static inline u8 hfi1_16B_get_fecn(struct hfi1_16b_header *hdr)
{}

static inline u8 hfi1_16B_get_l2(struct hfi1_16b_header *hdr)
{}

static inline u16 hfi1_16B_get_pkey(struct hfi1_16b_header *hdr)
{}

static inline u8 hfi1_16B_get_rc(struct hfi1_16b_header *hdr)
{}

static inline u8 hfi1_16B_get_age(struct hfi1_16b_header *hdr)
{}

static inline u16 hfi1_16B_get_len(struct hfi1_16b_header *hdr)
{}

static inline u16 hfi1_16B_get_entropy(struct hfi1_16b_header *hdr)
{}

#define OPA_16B_MAKE_QW(low_dw, high_dw)

/*
 * BTH
 */
#define OPA_16B_BTH_PAD_MASK
static inline u8 hfi1_16B_bth_get_pad(struct ib_other_headers *ohdr)
{}

/*
 * 16B Management
 */
#define OPA_16B_MGMT_QPN_MASK
static inline u32 hfi1_16B_get_dest_qpn(struct opa_16b_mgmt *mgmt)
{}

static inline u32 hfi1_16B_get_src_qpn(struct opa_16b_mgmt *mgmt)
{}

static inline void hfi1_16B_set_qpn(struct opa_16b_mgmt *mgmt,
				    u32 dest_qp, u32 src_qp)
{}

/**
 * hfi1_get_rc_ohdr - get extended header
 * @opah - the opaheader
 */
static inline struct ib_other_headers *
hfi1_get_rc_ohdr(struct hfi1_opa_header *opah)
{}

struct rvt_sge_state;

/*
 * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
 * Mostly for MADs that set or query link parameters, also ipath
 * config interfaces
 */
#define HFI1_IB_CFG_LIDLMC
#define HFI1_IB_CFG_LWID_DG_ENB
#define HFI1_IB_CFG_LWID_ENB
#define HFI1_IB_CFG_LWID
#define HFI1_IB_CFG_SPD_ENB
#define HFI1_IB_CFG_SPD
#define HFI1_IB_CFG_RXPOL_ENB
#define HFI1_IB_CFG_LREV_ENB
#define HFI1_IB_CFG_LINKLATENCY
#define HFI1_IB_CFG_HRTBT
#define HFI1_IB_CFG_OP_VLS
#define HFI1_IB_CFG_VL_HIGH_CAP
#define HFI1_IB_CFG_VL_LOW_CAP
#define HFI1_IB_CFG_OVERRUN_THRESH
#define HFI1_IB_CFG_PHYERR_THRESH
#define HFI1_IB_CFG_LINKDEFAULT
#define HFI1_IB_CFG_PKEYS
#define HFI1_IB_CFG_MTU
#define HFI1_IB_CFG_VL_HIGH_LIMIT
#define HFI1_IB_CFG_PMA_TICKS
#define HFI1_IB_CFG_PORT

/*
 * HFI or Host Link States
 *
 * These describe the states the driver thinks the logical and physical
 * states are in.  Used as an argument to set_link_state().  Implemented
 * as bits for easy multi-state checking.  The actual state can only be
 * one.
 */
#define __HLS_UP_INIT_BP
#define __HLS_UP_ARMED_BP
#define __HLS_UP_ACTIVE_BP
#define __HLS_DN_DOWNDEF_BP
#define __HLS_DN_POLL_BP
#define __HLS_DN_DISABLE_BP
#define __HLS_DN_OFFLINE_BP
#define __HLS_VERIFY_CAP_BP
#define __HLS_GOING_UP_BP
#define __HLS_GOING_OFFLINE_BP
#define __HLS_LINK_COOLDOWN_BP

#define HLS_UP_INIT
#define HLS_UP_ARMED
#define HLS_UP_ACTIVE
#define HLS_DN_DOWNDEF
#define HLS_DN_POLL
#define HLS_DN_DISABLE
#define HLS_DN_OFFLINE
#define HLS_VERIFY_CAP
#define HLS_GOING_UP
#define HLS_GOING_OFFLINE
#define HLS_LINK_COOLDOWN

#define HLS_UP
#define HLS_DOWN

#define HLS_DEFAULT

/* use this MTU size if none other is given */
#define HFI1_DEFAULT_ACTIVE_MTU
/* use this MTU size as the default maximum */
#define HFI1_DEFAULT_MAX_MTU
/* default partition key */
#define DEFAULT_PKEY

/*
 * Possible fabric manager config parameters for fm_{get,set}_table()
 */
#define FM_TBL_VL_HIGH_ARB
#define FM_TBL_VL_LOW_ARB
#define FM_TBL_BUFFER_CONTROL
#define FM_TBL_SC2VLNT
#define FM_TBL_VL_PREEMPT_ELEMS
#define FM_TBL_VL_PREEMPT_MATRIX

/*
 * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
 * these are bits so they can be combined, e.g.
 * HFI1_RCVCTRL_INTRAVAIL_ENB | HFI1_RCVCTRL_CTXT_ENB
 */
#define HFI1_RCVCTRL_TAILUPD_ENB
#define HFI1_RCVCTRL_TAILUPD_DIS
#define HFI1_RCVCTRL_CTXT_ENB
#define HFI1_RCVCTRL_CTXT_DIS
#define HFI1_RCVCTRL_INTRAVAIL_ENB
#define HFI1_RCVCTRL_INTRAVAIL_DIS
#define HFI1_RCVCTRL_PKEY_ENB
#define HFI1_RCVCTRL_PKEY_DIS
#define HFI1_RCVCTRL_TIDFLOW_ENB
#define HFI1_RCVCTRL_TIDFLOW_DIS
#define HFI1_RCVCTRL_ONE_PKT_EGR_ENB
#define HFI1_RCVCTRL_ONE_PKT_EGR_DIS
#define HFI1_RCVCTRL_NO_RHQ_DROP_ENB
#define HFI1_RCVCTRL_NO_RHQ_DROP_DIS
#define HFI1_RCVCTRL_NO_EGR_DROP_ENB
#define HFI1_RCVCTRL_NO_EGR_DROP_DIS
#define HFI1_RCVCTRL_URGENT_ENB
#define HFI1_RCVCTRL_URGENT_DIS

/* partition enforcement flags */
#define HFI1_PART_ENFORCE_IN
#define HFI1_PART_ENFORCE_OUT

/* how often we check for synthetic counter wrap around */
#define SYNTH_CNT_TIME

/* Counter flags */
#define CNTR_NORMAL
#define CNTR_SYNTH
#define CNTR_DISABLED
#define CNTR_32BIT
#define CNTR_VL
#define CNTR_SDMA
#define CNTR_INVALID_VL
#define CNTR_MODE_W
#define CNTR_MODE_R

/* VLs Supported/Operational */
#define HFI1_MIN_VLS_SUPPORTED
#define HFI1_MAX_VLS_SUPPORTED

#define HFI1_GUIDS_PER_PORT
#define HFI1_PORT_GUID_INDEX

static inline void incr_cntr64(u64 *cntr)
{}

#define MAX_NAME_SIZE
struct hfi1_msix_entry {};

struct hfi1_msix_info {};

/* per-SL CCA information */
struct cca_timer {};

struct link_down_reason {};

enum {};

struct vl_arb_cache {};

/*
 * The structure below encapsulates data relevant to a physical IB Port.
 * Current chips support only one such port, but the separation
 * clarifies things a bit. Note that to conform to IB conventions,
 * port-numbers are one-based. The first or only port is port1.
 */
struct hfi1_pportdata {};

opcode_handler;
hfi1_make_req;
extern const rhf_rcv_function_ptr normal_rhf_rcv_functions[];
extern const rhf_rcv_function_ptr netdev_rhf_rcv_functions[];

/* return values for the RHF receive functions */
#define RHF_RCV_CONTINUE
#define RHF_RCV_DONE
#define RHF_RCV_REPROCESS

struct rcv_array_data {};

struct per_vl_data {};

/* 16 to directly index */
#define PER_VL_SEND_CONTEXTS

struct err_info_rcvport {};

struct err_info_constraint {};

struct hfi1_temp {};

struct hfi1_i2c_bus {};

/* common data between shared ASIC HFIs */
struct hfi1_asic_data {};

/* sizes for both the QP and RSM map tables */
#define NUM_MAP_ENTRIES
#define NUM_MAP_REGS

/* Virtual NIC information */
struct hfi1_vnic_data {};

struct hfi1_vnic_vport_info;

/* device data struct now contains only "general per-device" info.
 * fields related to a physical IB port are in a hfi1_pportdata struct.
 */
struct sdma_engine;
struct sdma_vl_map;

#define BOARD_VERS_MAX
#define SERIAL_MAX

send_routine;
struct hfi1_netdev_rx;
struct hfi1_devdata {};

/* 8051 firmware version helper */
#define dc8051_ver(a, b, c)
#define dc8051_ver_maj(a)
#define dc8051_ver_min(a)
#define dc8051_ver_patch(a)

/* f_put_tid types */
#define PT_EXPECTED
#define PT_EAGER
#define PT_INVALID_FLUSH
#define PT_INVALID

struct tid_rb_node;

/* Private data for file operations */
struct hfi1_filedata {};

extern struct xarray hfi1_dev_table;
struct hfi1_devdata *hfi1_lookup(int unit);

static inline unsigned long uctxt_offset(struct hfi1_ctxtdata *uctxt)
{}

int hfi1_init(struct hfi1_devdata *dd, int reinit);
int hfi1_count_active_units(void);

int hfi1_diag_add(struct hfi1_devdata *dd);
void hfi1_diag_remove(struct hfi1_devdata *dd);
void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup);

void handle_user_interrupt(struct hfi1_ctxtdata *rcd);

int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd);
int hfi1_create_kctxts(struct hfi1_devdata *dd);
int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
			 struct hfi1_ctxtdata **rcd);
void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd);
void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
			 struct hfi1_devdata *dd, u8 hw_pidx, u32 port);
void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
int hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
						 u16 ctxt);
struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget);
int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget);
void set_all_slowpath(struct hfi1_devdata *dd);

extern const struct pci_device_id hfi1_pci_tbl[];
void hfi1_make_ud_req_9B(struct rvt_qp *qp,
			 struct hfi1_pkt_state *ps,
			 struct rvt_swqe *wqe);

void hfi1_make_ud_req_16B(struct rvt_qp *qp,
			  struct hfi1_pkt_state *ps,
			  struct rvt_swqe *wqe);

/* receive packet handler dispositions */
#define RCV_PKT_OK
#define RCV_PKT_LIMIT
#define RCV_PKT_DONE

/**
 * hfi1_rcd_head - add accessor for rcd head
 * @rcd: the context
 */
static inline u32 hfi1_rcd_head(struct hfi1_ctxtdata *rcd)
{}

/**
 * hfi1_set_rcd_head - add accessor for rcd head
 * @rcd: the context
 * @head: the new head
 */
static inline void hfi1_set_rcd_head(struct hfi1_ctxtdata *rcd, u32 head)
{}

/* calculate the current RHF address */
static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd)
{}

/* return DMA_RTAIL configuration */
static inline bool get_dma_rtail_setting(struct hfi1_ctxtdata *rcd)
{}

/**
 * hfi1_seq_incr_wrap - wrapping increment for sequence
 * @seq: the current sequence number
 *
 * Returns: the incremented seq
 */
static inline u8 hfi1_seq_incr_wrap(u8 seq)
{}

/**
 * hfi1_seq_cnt - return seq_cnt member
 * @rcd: the receive context
 *
 * Return seq_cnt member
 */
static inline u8 hfi1_seq_cnt(struct hfi1_ctxtdata *rcd)
{}

/**
 * hfi1_set_seq_cnt - return seq_cnt member
 * @rcd: the receive context
 *
 * Return seq_cnt member
 */
static inline void hfi1_set_seq_cnt(struct hfi1_ctxtdata *rcd, u8 cnt)
{}

/**
 * last_rcv_seq - is last
 * @rcd: the receive context
 * @seq: sequence
 *
 * return true if last packet
 */
static inline bool last_rcv_seq(struct hfi1_ctxtdata *rcd, u32 seq)
{}

/**
 * rcd_seq_incr - increment context sequence number
 * @rcd: the receive context
 * @seq: the current sequence number
 *
 * Returns: true if the this was the last packet
 */
static inline bool hfi1_seq_incr(struct hfi1_ctxtdata *rcd, u32 seq)
{}

/**
 * get_hdrqentsize - return hdrq entry size
 * @rcd: the receive context
 */
static inline u8 get_hdrqentsize(struct hfi1_ctxtdata *rcd)
{}

/**
 * get_hdrq_cnt - return hdrq count
 * @rcd: the receive context
 */
static inline u16 get_hdrq_cnt(struct hfi1_ctxtdata *rcd)
{}

/**
 * hfi1_is_slowpath - check if this context is slow path
 * @rcd: the receive context
 */
static inline bool hfi1_is_slowpath(struct hfi1_ctxtdata *rcd)
{}

/**
 * hfi1_is_fastpath - check if this context is fast path
 * @rcd: the receive context
 */
static inline bool hfi1_is_fastpath(struct hfi1_ctxtdata *rcd)
{}

/**
 * hfi1_set_fast - change to the fast handler
 * @rcd: the receive context
 */
static inline void hfi1_set_fast(struct hfi1_ctxtdata *rcd)
{}

int hfi1_reset_device(int);

void receive_interrupt_work(struct work_struct *work);

/* extract service channel from header and rhf */
static inline int hfi1_9B_get_sc5(struct ib_header *hdr, u64 rhf)
{}

#define HFI1_JKEY_WIDTH
#define HFI1_JKEY_MASK
#define HFI1_ADMIN_JKEY_RANGE

/*
 * J_KEYs are split and allocated in the following groups:
 *   0 - 31    - users with administrator privileges
 *  32 - 63    - kernel protocols using KDETH packets
 *  64 - 65535 - all other users using KDETH packets
 */
static inline u16 generate_jkey(kuid_t uid)
{}

/*
 * active_egress_rate
 *
 * returns the active egress rate in units of [10^6 bits/sec]
 */
static inline u32 active_egress_rate(struct hfi1_pportdata *ppd)
{}

/*
 * egress_cycles
 *
 * Returns the number of 'fabric clock cycles' to egress a packet
 * of length 'len' bytes, at 'rate' Mbit/s. Since the fabric clock
 * rate is (approximately) 805 MHz, the units of the returned value
 * are (1/805 MHz).
 */
static inline u32 egress_cycles(u32 len, u32 rate)
{}

void set_link_ipg(struct hfi1_pportdata *ppd);
void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
		  u32 rqpn, u8 svc_type);
void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
		u16 pkey, u32 slid, u32 dlid, u8 sc5,
		const struct ib_grh *old_grh);
void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
		    u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
		    u8 sc5, const struct ib_grh *old_grh);
hfi1_handle_cnp;

#define PKEY_CHECK_INVALID
int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
		      u8 sc5, int8_t s_pkey_index);

#define PACKET_EGRESS_TIMEOUT
static inline void pause_for_credit_return(struct hfi1_devdata *dd)
{}

/**
 * sc_to_vlt() - reverse lookup sc to vl
 * @dd - devdata
 * @sc5 - 5 bit sc
 */
static inline u8 sc_to_vlt(struct hfi1_devdata *dd, u8 sc5)
{}

#define PKEY_MEMBER_MASK
#define PKEY_LOW_15_MASK

/*
 * ingress_pkey_matches_entry - return 1 if the pkey matches ent (ent
 * being an entry from the ingress partition key table), return 0
 * otherwise. Use the matching criteria for ingress partition keys
 * specified in the OPAv1 spec., section 9.10.14.
 */
static inline int ingress_pkey_matches_entry(u16 pkey, u16 ent)
{}

/*
 * ingress_pkey_table_search - search the entire pkey table for
 * an entry which matches 'pkey'. return 0 if a match is found,
 * and 1 otherwise.
 */
static int ingress_pkey_table_search(struct hfi1_pportdata *ppd, u16 pkey)
{}

/*
 * ingress_pkey_table_fail - record a failure of ingress pkey validation,
 * i.e., increment port_rcv_constraint_errors for the port, and record
 * the 'error info' for this failure.
 */
static void ingress_pkey_table_fail(struct hfi1_pportdata *ppd, u16 pkey,
				    u32 slid)
{}

/*
 * ingress_pkey_check - Return 0 if the ingress pkey is valid, return 1
 * otherwise. Use the criteria in the OPAv1 spec, section 9.10.14. idx
 * is a hint as to the best place in the partition key table to begin
 * searching. This function should not be called on the data path because
 * of performance reasons. On datapath pkey check is expected to be done
 * by HW and rcv_pkey_check function should be called instead.
 */
static inline int ingress_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
				     u8 sc5, u8 idx, u32 slid, bool force)
{}

/*
 * rcv_pkey_check - Return 0 if the ingress pkey is valid, return 1
 * otherwise. It only ensures pkey is vlid for QP0. This function
 * should be called on the data path instead of ingress_pkey_check
 * as on data path, pkey check is done by HW (except for QP0).
 */
static inline int rcv_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
				 u8 sc5, u16 slid)
{}

/* MTU handling */

/* MTU enumeration, 256-4k match IB */
#define OPA_MTU_0
#define OPA_MTU_256
#define OPA_MTU_512
#define OPA_MTU_1024
#define OPA_MTU_2048
#define OPA_MTU_4096

u32 lrh_max_header_bytes(struct hfi1_devdata *dd);
int mtu_to_enum(u32 mtu, int default_if_bad);
u16 enum_to_mtu(int mtu);
static inline int valid_ib_mtu(unsigned int mtu)
{}

static inline int valid_opa_max_mtu(unsigned int mtu)
{}

int set_mtu(struct hfi1_pportdata *ppd);

int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc);
void hfi1_disable_after_error(struct hfi1_devdata *dd);
int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit);
int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode);

int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t);
int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t);

void set_up_vau(struct hfi1_devdata *dd, u8 vau);
void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf);
void reset_link_credits(struct hfi1_devdata *dd);
void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);

int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc);

static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd)
{}

static inline struct hfi1_devdata *dd_from_dev(struct hfi1_ibdev *dev)
{}

static inline struct hfi1_devdata *dd_from_ibdev(struct ib_device *ibdev)
{}

static inline struct hfi1_pportdata *ppd_from_ibp(struct hfi1_ibport *ibp)
{}

static inline struct hfi1_ibdev *dev_from_rdi(struct rvt_dev_info *rdi)
{}

static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u32 port)
{}

static inline struct hfi1_ibport *rcd_to_iport(struct hfi1_ctxtdata *rcd)
{}

/**
 * hfi1_may_ecn - Check whether FECN or BECN processing should be done
 * @pkt: the packet to be evaluated
 *
 * Check whether the FECN or BECN bits in the packet's header are
 * enabled, depending on packet type.
 *
 * This function only checks for FECN and BECN bits. Additional checks
 * are done in the slowpath (hfi1_process_ecn_slowpath()) in order to
 * ensure correct handling.
 */
static inline bool hfi1_may_ecn(struct hfi1_packet *pkt)
{}

bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
			       bool prescan);
static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt)
{}

/*
 * Return the indexed PKEY from the port PKEY table.
 */
static inline u16 hfi1_get_pkey(struct hfi1_ibport *ibp, unsigned index)
{}

/*
 * Return the indexed GUID from the port GUIDs table.
 */
static inline __be64 get_sguid(struct hfi1_ibport *ibp, unsigned int index)
{}

/*
 * Called by readers of cc_state only, must call under rcu_read_lock().
 */
static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
{}

/*
 * Called by writers of cc_state only,  must call under cc_state_lock.
 */
static inline
struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
{}

/*
 * values for dd->flags (_device_ related flags)
 */
#define HFI1_INITTED
#define HFI1_PRESENT
#define HFI1_FROZEN
#define HFI1_HAS_SDMA_TIMEOUT
#define HFI1_HAS_SEND_DMA
#define HFI1_FORCED_FREEZE
#define HFI1_SHUTDOWN

/* IB dword length mask in PBC (lower 11 bits); same for all chips */
#define HFI1_PBC_LENGTH_MASK

/* ctxt_flag bit offsets */
		/* base context has not finished initializing */
#define HFI1_CTXT_BASE_UNINIT
		/* base context initaliation failed */
#define HFI1_CTXT_BASE_FAILED
		/* waiting for a packet to arrive */
#define HFI1_CTXT_WAITING_RCV
		/* waiting for an urgent packet to arrive */
#define HFI1_CTXT_WAITING_URG

/* free up any allocated data at closes */
int hfi1_init_dd(struct hfi1_devdata *dd);
void hfi1_free_devdata(struct hfi1_devdata *dd);

/* LED beaconing functions */
void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
			     unsigned int timeoff);
void shutdown_led_override(struct hfi1_pportdata *ppd);

#define HFI1_CREDIT_RETURN_RATE

/*
 * The number of words for the KDETH protocol field.  If this is
 * larger then the actual field used, then part of the payload
 * will be in the header.
 *
 * Optimally, we want this sized so that a typical case will
 * use full cache lines.  The typical local KDETH header would
 * be:
 *
 *	Bytes	Field
 *	  8	LRH
 *	 12	BHT
 *	 ??	KDETH
 *	  8	RHF
 *	---
 *	 28 + KDETH
 *
 * For a 64-byte cache line, KDETH would need to be 36 bytes or 9 DWORDS
 */
#define DEFAULT_RCVHDRSIZE

/*
 * Maximal header byte count:
 *
 *	Bytes	Field
 *	  8	LRH
 *	 40	GRH (optional)
 *	 12	BTH
 *	 ??	KDETH
 *	  8	RHF
 *	---
 *	 68 + KDETH
 *
 * We also want to maintain a cache line alignment to assist DMA'ing
 * of the header bytes.  Round up to a good size.
 */
#define DEFAULT_RCVHDR_ENTSIZE

bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
			u32 nlocked, u32 npages);
int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr,
			    size_t npages, bool writable, struct page **pages);
void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
			     size_t npages, bool dirty);

/**
 * hfi1_rcvhdrtail_kvaddr - return tail kvaddr
 * @rcd - the receive context
 */
static inline __le64 *hfi1_rcvhdrtail_kvaddr(const struct hfi1_ctxtdata *rcd)
{}

static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
{}

static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
{}

static inline bool hfi1_packet_present(struct hfi1_ctxtdata *rcd)
{}

/*
 * sysfs interface.
 */

extern const char ib_hfi1_version[];
extern const struct attribute_group ib_hfi1_attr_group;
extern const struct attribute_group *hfi1_attr_port_groups[];

int hfi1_device_create(struct hfi1_devdata *dd);
void hfi1_device_remove(struct hfi1_devdata *dd);

int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd);
void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
/* Hook for sysfs read of QSFP */
int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);

int hfi1_pcie_init(struct hfi1_devdata *dd);
void hfi1_pcie_cleanup(struct pci_dev *pdev);
int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev);
void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
int pcie_speeds(struct hfi1_devdata *dd);
int restore_pci_variables(struct hfi1_devdata *dd);
int save_pci_variables(struct hfi1_devdata *dd);
int do_pcie_gen3_transition(struct hfi1_devdata *dd);
void tune_pcie_caps(struct hfi1_devdata *dd);
int parse_platform_config(struct hfi1_devdata *dd);
int get_platform_config_field(struct hfi1_devdata *dd,
			      enum platform_config_table_type_encoding
			      table_type, int table_index, int field_index,
			      u32 *data, u32 len);

struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi);

/*
 * Flush write combining store buffers (if present) and perform a write
 * barrier.
 */
static inline void flush_wc(void)
{}

void handle_eflags(struct hfi1_packet *packet);
void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd);

/* global module parameter variables */
extern unsigned int hfi1_max_mtu;
extern unsigned int hfi1_cu;
extern unsigned int user_credit_return_threshold;
extern int num_user_contexts;
extern unsigned long n_krcvqs;
extern uint krcvqs[];
extern int krcvqsset;
extern uint loopback;
extern uint quick_linkup;
extern uint rcv_intr_timeout;
extern uint rcv_intr_count;
extern uint rcv_intr_dynamic;
extern ushort link_crc_mask;

extern struct mutex hfi1_mutex;

/* Number of seconds before our card status check...  */
#define STATUS_TIMEOUT

#define DRIVER_NAME
#define HFI1_USER_MINOR_BASE
#define HFI1_TRACE_MINOR
#define HFI1_NMINORS

#define PCI_VENDOR_ID_INTEL
#define PCI_DEVICE_ID_INTEL0
#define PCI_DEVICE_ID_INTEL1

#define HFI1_PKT_USER_SC_INTEGRITY

#define HFI1_PKT_KERNEL_SC_INTEGRITY

static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
						  u16 ctxt_type)
{}

static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
{}

#define dd_dev_emerg(dd, fmt, ...)

#define dd_dev_err(dd, fmt, ...)

#define dd_dev_err_ratelimited(dd, fmt, ...)

#define dd_dev_warn(dd, fmt, ...)

#define dd_dev_warn_ratelimited(dd, fmt, ...)

#define dd_dev_info(dd, fmt, ...)

#define dd_dev_info_ratelimited(dd, fmt, ...)

#define dd_dev_dbg(dd, fmt, ...)

#define hfi1_dev_porterr(dd, port, fmt, ...)

/*
 * this is used for formatting hw error messages...
 */
struct hfi1_hwerror_msgs {};

/* in intr.c... */
void hfi1_format_hwerrors(u64 hwerrs,
			  const struct hfi1_hwerror_msgs *hwerrmsgs,
			  size_t nhwerrmsgs, char *msg, size_t lmsg);

#define USER_OPCODE_CHECK_VAL
#define USER_OPCODE_CHECK_MASK
#define OPCODE_CHECK_VAL_DISABLED
#define OPCODE_CHECK_MASK_DISABLED

static inline void hfi1_reset_cpu_counters(struct hfi1_devdata *dd)
{}

/* Control LED state */
static inline void setextled(struct hfi1_devdata *dd, u32 on)
{}

/* return the i2c resource given the target */
static inline u32 i2c_target(u32 target)
{}

/* return the i2c chain chip resource that this HFI uses for QSFP */
static inline u32 qsfp_resource(struct hfi1_devdata *dd)
{}

/* Is this device integrated or discrete? */
static inline bool is_integrated(struct hfi1_devdata *dd)
{}

/**
 * hfi1_need_drop - detect need for drop
 * @dd: - the device
 *
 * In some cases, the first packet needs to be dropped.
 *
 * Return true is the current packet needs to be dropped and false otherwise.
 */
static inline bool hfi1_need_drop(struct hfi1_devdata *dd)
{}

int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);

#define DD_DEV_ENTRY(dd)
#define DD_DEV_ASSIGN(dd)

static inline void hfi1_update_ah_attr(struct ib_device *ibdev,
				       struct rdma_ah_attr *attr)
{}

/*
 * hfi1_check_mcast- Check if the given lid is
 * in the OPA multicast range.
 *
 * The LID might either reside in ah.dlid or might be
 * in the GRH of the address handle as DGID if extended
 * addresses are in use.
 */
static inline bool hfi1_check_mcast(u32 lid)
{}

#define opa_get_lid(lid, format)

/* Convert a lid to a specific lid space */
static inline u32 __opa_get_lid(u32 lid, u8 format)
{}

/* Return true if the given lid is the OPA 16B multicast range */
static inline bool hfi1_is_16B_mcast(u32 lid)
{}

static inline void hfi1_make_opa_lid(struct rdma_ah_attr *attr)
{}

static inline u8 hfi1_get_packet_type(u32 lid)
{}

static inline bool hfi1_get_hdr_type(u32 lid, struct rdma_ah_attr *attr)
{}

static inline void hfi1_make_ext_grh(struct hfi1_packet *packet,
				     struct ib_grh *grh, u32 slid,
				     u32 dlid)
{}

static inline int hfi1_get_16b_padding(u32 hdr_size, u32 payload)
{}

static inline void hfi1_make_ib_hdr(struct ib_header *hdr,
				    u16 lrh0, u16 len,
				    u16 dlid, u16 slid)
{}

static inline void hfi1_make_16b_hdr(struct hfi1_16b_header *hdr,
				     u32 slid, u32 dlid,
				     u16 len, u16 pkey,
				     bool becn, bool fecn, u8 l4,
				     u8 sc)
{}
#endif                          /* _HFI1_KERNEL_H */