linux/drivers/infiniband/hw/hfi1/driver.c

// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
 * Copyright(c) 2015-2020 Intel Corporation.
 * Copyright(c) 2021 Cornelis Networks.
 */

#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/prefetch.h>
#include <rdma/ib_verbs.h>
#include <linux/etherdevice.h>

#include "hfi.h"
#include "trace.h"
#include "qp.h"
#include "sdma.h"
#include "debugfs.h"
#include "vnic.h"
#include "fault.h"

#include "ipoib.h"
#include "netdev.h"

#undef pr_fmt
#define pr_fmt(fmt)

DEFINE_MUTEX();	/* general driver use */

unsigned int hfi1_max_mtu =;
module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO);
MODULE_PARM_DESC();

unsigned int hfi1_cu =;
module_param_named(cu, hfi1_cu, uint, S_IRUGO);
MODULE_PARM_DESC();

unsigned long hfi1_cap_mask =;
static int hfi1_caps_set(const char *val, const struct kernel_param *kp);
static int hfi1_caps_get(char *buffer, const struct kernel_param *kp);
static const struct kernel_param_ops cap_ops =;
module_param_cb();
MODULE_PARM_DESC();

MODULE_LICENSE();
MODULE_DESCRIPTION();

/*
 * MAX_PKT_RCV is the max # if packets processed per receive interrupt.
 */
#define MAX_PKT_RECV
/*
 * MAX_PKT_THREAD_RCV is the max # of packets processed before
 * the qp_wait_list queue is flushed.
 */
#define MAX_PKT_RECV_THREAD
#define EGR_HEAD_UPDATE_THRESHOLD

struct hfi1_ib_stats hfi1_stats;

static int hfi1_caps_set(const char *val, const struct kernel_param *kp)
{}

static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
{}

struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi)
{}

/*
 * Return count of units with at least one port ACTIVE.
 */
int hfi1_count_active_units(void)
{}

/*
 * Get address of eager buffer from it's index (allocated in chunks, not
 * contiguous).
 */
static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
			       u8 *update)
{}

static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd,
				    __le32 *rhf_addr)
{}

static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd,
						   __le32 *rhf_addr)
{}

static inline struct hfi1_16b_header
		*hfi1_get_16B_header(struct hfi1_ctxtdata *rcd,
				     __le32 *rhf_addr)
{}

/*
 * Validate and encode the a given RcvArray Buffer size.
 * The function will check whether the given size falls within
 * allowed size ranges for the respective type and, optionally,
 * return the proper encoding.
 */
int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
{}

static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
		       struct hfi1_packet *packet)
{}

static inline void init_packet(struct hfi1_ctxtdata *rcd,
			       struct hfi1_packet *packet)
{}

/* We support only two types - 9B and 16B for now */
static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] =;

/**
 * hfi1_process_ecn_slowpath - Process FECN or BECN bits
 * @qp: The packet's destination QP
 * @pkt: The packet itself.
 * @prescan: Is the caller the RXQ prescan
 *
 * Process the packet's FECN or BECN bits. By now, the packet
 * has already been evaluated whether processing of those bit should
 * be done.
 * The significance of the @prescan argument is that if the caller
 * is the RXQ prescan, a CNP will be send out instead of waiting for the
 * normal packet processing to send an ACK with BECN set (or a CNP).
 */
bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
			       bool prescan)
{}

struct ps_mdata {};

static inline void init_ps_mdata(struct ps_mdata *mdata,
				 struct hfi1_packet *packet)
{}

static inline int ps_done(struct ps_mdata *mdata, u64 rhf,
			  struct hfi1_ctxtdata *rcd)
{}

static inline int ps_skip(struct ps_mdata *mdata, u64 rhf,
			  struct hfi1_ctxtdata *rcd)
{}

static inline void update_ps_mdata(struct ps_mdata *mdata,
				   struct hfi1_ctxtdata *rcd)
{}

/*
 * prescan_rxq - search through the receive queue looking for packets
 * containing Excplicit Congestion Notifications (FECNs, or BECNs).
 * When an ECN is found, process the Congestion Notification, and toggle
 * it off.
 * This is declared as a macro to allow quick checking of the port to avoid
 * the overhead of a function call if not enabled.
 */
#define prescan_rxq(rcd, packet)
static void __prescan_rxq(struct hfi1_packet *packet)
{}

static void process_rcv_qp_work(struct hfi1_packet *packet)
{}

static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread)
{}

static inline int check_max_packet(struct hfi1_packet *packet, int thread)
{}

static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
{}

static void process_rcv_packet_napi(struct hfi1_packet *packet)
{}

static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
{}

static inline void process_rcv_update(int last, struct hfi1_packet *packet)
{}

static inline void finish_packet(struct hfi1_packet *packet)
{}

/*
 * handle_receive_interrupt_napi_fp - receive a packet
 * @rcd: the context
 * @budget: polling budget
 *
 * Called from interrupt handler for receive interrupt.
 * This is the fast path interrupt handler
 * when executing napi soft irq environment.
 */
int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget)
{}

/*
 * Handle receive interrupts when using the no dma rtail option.
 */
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
{}

int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
{}

static void set_all_fastpath(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
{}

void set_all_slowpath(struct hfi1_devdata *dd)
{}

static bool __set_armed_to_active(struct hfi1_packet *packet)
{}

/**
 * set_armed_to_active  - the fast path for armed to active
 * @packet: the packet structure
 *
 * Return true if packet processing needs to bail.
 */
static bool set_armed_to_active(struct hfi1_packet *packet)
{}

/*
 * handle_receive_interrupt - receive a packet
 * @rcd: the context
 *
 * Called from interrupt handler for errors or receive interrupt.
 * This is the slow path interrupt handler.
 */
int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
{}

/*
 * handle_receive_interrupt_napi_sp - receive a packet
 * @rcd: the context
 * @budget: polling budget
 *
 * Called from interrupt handler for errors or receive interrupt.
 * This is the slow path interrupt handler
 * when executing napi soft irq environment.
 */
int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget)
{}

/*
 * We may discover in the interrupt that the hardware link state has
 * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
 * and we need to update the driver's notion of the link state.  We cannot
 * run set_link_state from interrupt context, so we queue this function on
 * a workqueue.
 *
 * We delay the regular interrupt processing until after the state changes
 * so that the link will be in the correct state by the time any application
 * we wake up attempts to send a reply to any message it received.
 * (Subsequent receive interrupts may possibly force the wakeup before we
 * update the link state.)
 *
 * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes
 * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues,
 * so we're safe from use-after-free of the rcd.
 */
void receive_interrupt_work(struct work_struct *work)
{}

/*
 * Convert a given MTU size to the on-wire MAD packet enumeration.
 * Return -1 if the size is invalid.
 */
int mtu_to_enum(u32 mtu, int default_if_bad)
{}

u16 enum_to_mtu(int mtu)
{}

/*
 * set_mtu - set the MTU
 * @ppd: the per port data
 *
 * We can handle "any" incoming size, the issue here is whether we
 * need to restrict our outgoing size.  We do not deal with what happens
 * to programs that are already running when the size changes.
 */
int set_mtu(struct hfi1_pportdata *ppd)
{}

int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
{}

void shutdown_led_override(struct hfi1_pportdata *ppd)
{}

static void run_led_override(struct timer_list *t)
{}

/*
 * To have the LED blink in a particular pattern, provide timeon and timeoff
 * in milliseconds.
 * To turn off custom blinking and return to normal operation, use
 * shutdown_led_override()
 */
void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
			     unsigned int timeoff)
{}

/**
 * hfi1_reset_device - reset the chip if possible
 * @unit: the device to reset
 *
 * Whether or not reset is successful, we attempt to re-initialize the chip
 * (that is, much like a driver unload/reload).  We clear the INITTED flag
 * so that the various entry points will fail until we reinitialize.  For
 * now, we only allow this if no user contexts are open that use chip resources
 */
int hfi1_reset_device(int unit)
{}

static inline void hfi1_setup_ib_header(struct hfi1_packet *packet)
{}

static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet)
{}

static int hfi1_setup_9B_packet(struct hfi1_packet *packet)
{}

static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
{}

static void show_eflags_errs(struct hfi1_packet *packet)
{}

void handle_eflags(struct hfi1_packet *packet)
{}

static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet)
{}

/*
 * The following functions are called by the interrupt handler. They are type
 * specific handlers for each packet type.
 */
static void process_receive_ib(struct hfi1_packet *packet)
{}

static void process_receive_bypass(struct hfi1_packet *packet)
{}

static void process_receive_error(struct hfi1_packet *packet)
{}

static void kdeth_process_expected(struct hfi1_packet *packet)
{}

static void kdeth_process_eager(struct hfi1_packet *packet)
{}

static void process_receive_invalid(struct hfi1_packet *packet)
{}

#define HFI1_RCVHDR_DUMP_MAX

void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd)
{}

const rhf_rcv_function_ptr normal_rhf_rcv_functions[] =;

const rhf_rcv_function_ptr netdev_rhf_rcv_functions[] =;