linux/drivers/infiniband/hw/hfi1/mad.c

// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
 * Copyright(c) 2015-2018 Intel Corporation.
 */

#include <linux/net.h>
#include <rdma/opa_addr.h>
#define OPA_NUM_PKEY_BLOCKS_PER_SMP

#include "hfi.h"
#include "mad.h"
#include "trace.h"
#include "qp.h"
#include "vnic.h"

/* the reset value from the FM is supposed to be 0xffff, handle both */
#define OPA_LINK_WIDTH_RESET_OLD
#define OPA_LINK_WIDTH_RESET

struct trap_node {};

static int smp_length_check(u32 data_size, u32 request_len)
{}

static int reply(struct ib_mad_hdr *smp)
{}

static inline void clear_opa_smp_data(struct opa_smp *smp)
{}

static u16 hfi1_lookup_pkey_value(struct hfi1_ibport *ibp, int pkey_idx)
{}

void hfi1_event_pkey_change(struct hfi1_devdata *dd, u32 port)
{}

/*
 * If the port is down, clean up all pending traps.  We need to be careful
 * with the given trap, because it may be queued.
 */
static void cleanup_traps(struct hfi1_ibport *ibp, struct trap_node *trap)
{}

static struct trap_node *check_and_add_trap(struct hfi1_ibport *ibp,
					    struct trap_node *trap)
{}

static void subn_handle_opa_trap_repress(struct hfi1_ibport *ibp,
					 struct opa_smp *smp)
{}

static void hfi1_update_sm_ah_attr(struct hfi1_ibport *ibp,
				   struct rdma_ah_attr *attr, u32 dlid)
{}

static int hfi1_modify_qp0_ah(struct hfi1_ibport *ibp,
			      struct rvt_ah *ah, u32 dlid)
{}

static struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u32 dlid)
{}

static void send_trap(struct hfi1_ibport *ibp, struct trap_node *trap)
{}

void hfi1_handle_trap_timer(struct timer_list *t)
{}

static struct trap_node *create_trap_node(u8 type, __be16 trap_num, u32 lid)
{}

/*
 * Send a bad P_Key trap (ch. 14.3.8).
 */
void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl,
		   u32 qp1, u32 qp2, u32 lid1, u32 lid2)
{}

/*
 * Send a bad M_Key trap (ch. 14.3.9).
 */
static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
		     __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt)
{}

/*
 * Send a Port Capability Mask Changed trap (ch. 14.3.11).
 */
void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num)
{}

/*
 * Send a System Image GUID Changed trap (ch. 14.3.12).
 */
void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
{}

/*
 * Send a Node Description Changed trap (ch. 14.3.13).
 */
void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
{}

static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
				   u8 *data, struct ib_device *ibdev,
				   u32 port, u32 *resp_len, u32 max_len)
{}

static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
				   struct ib_device *ibdev, u32 port,
				   u32 *resp_len, u32 max_len)
{}

static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
			     u32 port)
{}

static void set_link_width_enabled(struct hfi1_pportdata *ppd, u32 w)
{}

static void set_link_width_downgrade_enabled(struct hfi1_pportdata *ppd, u32 w)
{}

static void set_link_speed_enabled(struct hfi1_pportdata *ppd, u32 s)
{}

static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
		      int mad_flags, __be64 mkey, __be32 dr_slid,
		      u8 return_path[], u8 hop_cnt)
{}

/*
 * The SMA caches reads from LCB registers in case the LCB is unavailable.
 * (The LCB is unavailable in certain link states, for example.)
 */
struct lcb_datum {};

static struct lcb_datum lcb_cache[] =;

static int write_lcb_cache(u32 off, u64 val)
{}

static int read_lcb_cache(u32 off, u64 *val)
{}

void read_ltp_rtt(struct hfi1_devdata *dd)
{}

static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
				   struct ib_device *ibdev, u32 port,
				   u32 *resp_len, u32 max_len)
{}

/**
 * get_pkeys - return the PKEY table
 * @dd: the hfi1_ib device
 * @port: the IB port number
 * @pkeys: the pkey table is placed here
 */
static int get_pkeys(struct hfi1_devdata *dd, u32 port, u16 *pkeys)
{}

static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
				    struct ib_device *ibdev, u32 port,
				    u32 *resp_len, u32 max_len)
{}

enum {};

/*
 * Use shortened names to improve readability of
 * {logical,physical}_state_transitions
 */
enum {};

/*
 * IB_PORTPHYSSTATE_POLLING (2) through OPA_PORTPHYSSTATE_MAX (11) are
 * represented in physical_state_transitions.
 */
#define __N_PHYSTATES

/*
 * Within physical_state_transitions, rows represent "old" states,
 * columns "new" states, and physical_state_transitions.allowed[old][new]
 * indicates if the transition from old state to new state is legal (see
 * OPAg1v1, Table 6-4).
 */
static const struct {} physical_state_transitions =;

/*
 * IB_PORT_DOWN (1) through IB_PORT_ACTIVE_DEFER (5) are represented
 * logical_state_transitions
 */

#define __N_LOGICAL_STATES

/*
 * Within logical_state_transitions rows represent "old" states,
 * columns "new" states, and logical_state_transitions.allowed[old][new]
 * indicates if the transition from old state to new state is legal (see
 * OPAg1v1, Table 9-12).
 */
static const struct {} logical_state_transitions =;

static int logical_transition_allowed(int old, int new)
{}

static int physical_transition_allowed(int old, int new)
{}

static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
					  u32 logical_new, u32 physical_new)
{}

static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
			   u32 logical_state, u32 phys_state, int local_mad)
{}

/*
 * subn_set_opa_portinfo - set port information
 * @smp: the incoming SM packet
 * @ibdev: the infiniband device
 * @port: the port on the device
 *
 */
static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
				   struct ib_device *ibdev, u32 port,
				   u32 *resp_len, u32 max_len, int local_mad)
{}

/**
 * set_pkeys - set the PKEY table for ctxt 0
 * @dd: the hfi1_ib device
 * @port: the IB port number
 * @pkeys: the PKEY table
 */
static int set_pkeys(struct hfi1_devdata *dd, u32 port, u16 *pkeys)
{}

static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
				    struct ib_device *ibdev, u32 port,
				    u32 *resp_len, u32 max_len)
{}

#define ILLEGAL_VL
/*
 * filter_sc2vlt changes mappings to VL15 to ILLEGAL_VL (except
 * for SC15, which must map to VL15). If we don't remap things this
 * way it is possible for VL15 counters to increment when we try to
 * send on a SC which is mapped to an invalid VL.
 * When getting the table convert ILLEGAL_VL back to VL15.
 */
static void filter_sc2vlt(void *data, bool set)
{}

static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
{}

static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
{}

static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
				   struct ib_device *ibdev, u32 port,
				   u32 *resp_len, u32 max_len)
{}

static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
				   struct ib_device *ibdev, u32 port,
				   u32 *resp_len, u32 max_len)
{}

static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
				   struct ib_device *ibdev, u32 port,
				   u32 *resp_len, u32 max_len)
{}

static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
				   struct ib_device *ibdev, u32 port,
				   u32 *resp_len, u32 max_len)
{}

static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
				    struct ib_device *ibdev, u32 port,
				    u32 *resp_len, u32 max_len)
{}

static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
				    struct ib_device *ibdev, u32 port,
				    u32 *resp_len, u32 max_len)
{}

static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
				     struct ib_device *ibdev, u32 port,
				     u32 *resp_len, u32 max_len)
{}

static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
				     struct ib_device *ibdev, u32 port,
				     u32 *resp_len, u32 max_len)
{}

static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
			      struct ib_device *ibdev, u32 port,
			      u32 *resp_len, u32 max_len)
{}

static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
			      struct ib_device *ibdev, u32 port,
			      u32 *resp_len, u32 max_len, int local_mad)
{}

static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
				     struct ib_device *ibdev, u32 port,
				     u32 *resp_len, u32 max_len)
{}

static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
			      struct ib_device *ibdev, u32 port, u32 *resp_len,
			      u32 max_len)
{}

static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
			      struct ib_device *ibdev, u32 port, u32 *resp_len,
			      u32 max_len)
{}

static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
				 struct ib_device *ibdev, u32 port,
				 u32 *resp_len, u32 max_len)
{}

static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
				 struct ib_device *ibdev, u32 port,
				 u32 *resp_len, u32 max_len)
{}

struct opa_pma_mad {} __packed;

struct opa_port_status_req {};

#define VL_MASK_ALL

struct opa_port_status_rsp {};

enum counter_selects {};

struct opa_clear_port_status {};

struct opa_aggregate {};

#define MSK_LLI
#define MSK_LLI_SFT
#define MSK_LER
#define MSK_LER_SFT
#define ADD_LLI
#define ADD_LER

/* Request contains first three fields, response contains those plus the rest */
struct opa_port_data_counters_msg {};

struct opa_port_error_counters64_msg {};

struct opa_port_error_info_msg {};

/* opa_port_error_info_msg error_info_select_mask bit definitions */
enum error_info_selects {};

static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
				     struct ib_device *ibdev, u32 *resp_len)
{}

static void a0_portstatus(struct hfi1_pportdata *ppd,
			  struct opa_port_status_rsp *rsp)
{}

/**
 * tx_link_width - convert link width bitmask to integer
 * value representing actual link width.
 * @link_width: width of active link
 * @return: return index of the bit set in link_width var
 *
 * The function convert and return the index of bit set
 * that indicate the current link width.
 */
u16 tx_link_width(u16 link_width)
{}

/**
 * get_xmit_wait_counters - Convert HFI 's SendWaitCnt/SendWaitVlCnt
 * counter in unit of TXE cycle times to flit times.
 * @ppd: info of physical Hfi port
 * @link_width: width of active link
 * @link_speed: speed of active link
 * @vl: represent VL0-VL7, VL15 for PortVLXmitWait counters request
 * and if vl value is C_VL_COUNT, it represent SendWaitCnt
 * counter request
 * @return: return SendWaitCnt/SendWaitVlCnt counter value per vl.
 *
 * Convert SendWaitCnt/SendWaitVlCnt counter from TXE cycle times to
 * flit times. Call this function to samples these counters. This
 * function will calculate for previous state transition and update
 * current state at end of function using ppd->prev_link_width and
 * ppd->port_vl_xmit_wait_last to port_vl_xmit_wait_curr and link_width.
 */
u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd,
			   u16 link_width, u16 link_speed, int vl)
{}

static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
				  struct ib_device *ibdev,
				  u32 port, u32 *resp_len)
{}

static u64 get_error_counter_summary(struct ib_device *ibdev, u32 port,
				     u8 res_lli, u8 res_ler)
{}

static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp)
{}

static void pma_get_opa_port_dctrs(struct ib_device *ibdev,
				   struct _port_dctrs *rsp)
{}

static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
				    struct ib_device *ibdev,
				    u32 port, u32 *resp_len)
{}

static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp,
				       struct ib_device *ibdev, u32 port)
{}

static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
				   struct _port_ectrs *rsp, u32 port)
{}

static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
				  struct ib_device *ibdev,
				  u32 port, u32 *resp_len)
{}

static int pma_get_ib_portcounters(struct ib_pma_mad *pmp,
				   struct ib_device *ibdev, u32 port)
{}

static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
				 struct ib_device *ibdev,
				 u32 port, u32 *resp_len)
{}

static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
				  struct ib_device *ibdev,
				  u32 port, u32 *resp_len)
{}

static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
				 struct ib_device *ibdev,
				 u32 port, u32 *resp_len)
{}

struct opa_congestion_info_attr {} __packed;

static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
				    struct ib_device *ibdev, u32 port,
				    u32 *resp_len, u32 max_len)
{}

static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
				       u8 *data, struct ib_device *ibdev,
				       u32 port, u32 *resp_len, u32 max_len)
{}

/*
 * Apply congestion control information stored in the ppd to the
 * active structure.
 */
static void apply_cc_state(struct hfi1_pportdata *ppd)
{}

static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
				       struct ib_device *ibdev, u32 port,
				       u32 *resp_len, u32 max_len)
{}

static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
					u8 *data, struct ib_device *ibdev,
					u32 port, u32 *resp_len, u32 max_len)
{}

static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
				   struct ib_device *ibdev, u32 port,
				   u32 *resp_len, u32 max_len)
{}

static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
				   struct ib_device *ibdev, u32 port,
				   u32 *resp_len, u32 max_len)
{}

struct opa_led_info {};

#define OPA_LED_SHIFT
#define OPA_LED_MASK

static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
				   struct ib_device *ibdev, u32 port,
				   u32 *resp_len, u32 max_len)
{}

static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
				   struct ib_device *ibdev, u32 port,
				   u32 *resp_len, u32 max_len)
{}

static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
			    u8 *data, struct ib_device *ibdev, u32 port,
			    u32 *resp_len, u32 max_len)
{}

static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
			    u8 *data, struct ib_device *ibdev, u32 port,
			    u32 *resp_len, u32 max_len, int local_mad)
{}

static inline void set_aggr_error(struct opa_aggregate *ag)
{}

static int subn_get_opa_aggregate(struct opa_smp *smp,
				  struct ib_device *ibdev, u32 port,
				  u32 *resp_len)
{}

static int subn_set_opa_aggregate(struct opa_smp *smp,
				  struct ib_device *ibdev, u32 port,
				  u32 *resp_len, int local_mad)
{}

/*
 * OPAv1 specifies that, on the transition to link up, these counters
 * are cleared:
 *   PortRcvErrors [*]
 *   LinkErrorRecovery
 *   LocalLinkIntegrityErrors
 *   ExcessiveBufferOverruns [*]
 *
 * [*] Error info associated with these counters is retained, but the
 * error info status is reset to 0.
 */
void clear_linkup_counters(struct hfi1_devdata *dd)
{}

static int is_full_mgmt_pkey_in_table(struct hfi1_ibport *ibp)
{}

/*
 * is_local_mad() returns 1 if 'mad' is sent from, and destined to the
 * local node, 0 otherwise.
 */
static int is_local_mad(struct hfi1_ibport *ibp, const struct opa_mad *mad,
			const struct ib_wc *in_wc)
{}

/*
 * opa_local_smp_check() should only be called on MADs for which
 * is_local_mad() returns true. It applies the SMP checks that are
 * specific to SMPs which are sent from, and destined to this node.
 * opa_local_smp_check() returns 0 if the SMP passes its checks, 1
 * otherwise.
 *
 * SMPs which arrive from other nodes are instead checked by
 * opa_smp_check().
 */
static int opa_local_smp_check(struct hfi1_ibport *ibp,
			       const struct ib_wc *in_wc)
{}

/**
 * hfi1_pkey_validation_pma - It validates PKEYs for incoming PMA MAD packets.
 * @ibp: IB port data
 * @in_mad: MAD packet with header and data
 * @in_wc: Work completion data such as source LID, port number, etc.
 *
 * These are all the possible logic rules for validating a pkey:
 *
 * a) If pkey neither FULL_MGMT_P_KEY nor LIM_MGMT_P_KEY,
 *    and NOT self-originated packet:
 *     Drop MAD packet as it should always be part of the
 *     management partition unless it's a self-originated packet.
 *
 * b) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY in pkey table:
 *     The packet is coming from a management node and the receiving node
 *     is also a management node, so it is safe for the packet to go through.
 *
 * c) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY is NOT in pkey table:
 *     Drop the packet as LIM_MGMT_P_KEY should always be in the pkey table.
 *     It could be an FM misconfiguration.
 *
 * d) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY is NOT in pkey table:
 *     It is safe for the packet to go through since a non-management node is
 *     talking to another non-management node.
 *
 * e) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY in pkey table:
 *     Drop the packet because a non-management node is talking to a
 *     management node, and it could be an attack.
 *
 * For the implementation, these rules can be simplied to only checking
 * for (a) and (e). There's no need to check for rule (b) as
 * the packet doesn't need to be dropped. Rule (c) is not possible in
 * the driver as LIM_MGMT_P_KEY is always in the pkey table.
 *
 * Return:
 * 0 - pkey is okay, -EINVAL it's a bad pkey
 */
static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp,
				    const struct opa_mad *in_mad,
				    const struct ib_wc *in_wc)
{}

static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
			    u32 port, const struct opa_mad *in_mad,
			    struct opa_mad *out_mad,
			    u32 *resp_len, int local_mad)
{}

static int process_subn(struct ib_device *ibdev, int mad_flags,
			u32 port, const struct ib_mad *in_mad,
			struct ib_mad *out_mad)
{}

static int process_perf(struct ib_device *ibdev, u32 port,
			const struct ib_mad *in_mad,
			struct ib_mad *out_mad)
{}

static int process_perf_opa(struct ib_device *ibdev, u32 port,
			    const struct opa_mad *in_mad,
			    struct opa_mad *out_mad, u32 *resp_len)
{}

static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
				u32 port, const struct ib_wc *in_wc,
				const struct ib_grh *in_grh,
				const struct opa_mad *in_mad,
				struct opa_mad *out_mad, size_t *out_mad_size,
				u16 *out_mad_pkey_index)
{}

static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u32 port,
			       const struct ib_wc *in_wc,
			       const struct ib_grh *in_grh,
			       const struct ib_mad *in_mad,
			       struct ib_mad *out_mad)
{}

/**
 * hfi1_process_mad - process an incoming MAD packet
 * @ibdev: the infiniband device this packet came in on
 * @mad_flags: MAD flags
 * @port: the port number this packet came in on
 * @in_wc: the work completion entry for this packet
 * @in_grh: the global route header for this packet
 * @in_mad: the incoming MAD
 * @out_mad: any outgoing MAD reply
 * @out_mad_size: size of the outgoing MAD reply
 * @out_mad_pkey_index: used to apss back the packet key index
 *
 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
 * interested in processing.
 *
 * Note that the verbs framework has already done the MAD sanity checks,
 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
 * MADs.
 *
 * This is called by the ib_mad module.
 */
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u32 port,
		     const struct ib_wc *in_wc, const struct ib_grh *in_grh,
		     const struct ib_mad *in_mad, struct ib_mad *out_mad,
		     size_t *out_mad_size, u16 *out_mad_pkey_index)
{}