linux/drivers/infiniband/hw/hfi1/pio.c

// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
 * Copyright(c) 2015-2018 Intel Corporation.
 */

#include <linux/delay.h>
#include "hfi.h"
#include "qp.h"
#include "trace.h"

#define SC(name)
/*
 * Send Context functions
 */
static void sc_wait_for_packet_egress(struct send_context *sc, int pause);

/*
 * Set the CM reset bit and wait for it to clear.  Use the provided
 * sendctrl register.  This routine has no locking.
 */
void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
{}

/* global control of PIO send */
void pio_send_control(struct hfi1_devdata *dd, int op)
{}

/* number of send context memory pools */
#define NUM_SC_POOLS

/* Send Context Size (SCS) wildcards */
#define SCS_POOL_0
#define SCS_POOL_1

/* Send Context Count (SCC) wildcards */
#define SCC_PER_VL
#define SCC_PER_CPU
#define SCC_PER_KRCVQ

/* Send Context Size (SCS) constants */
#define SCS_ACK_CREDITS
#define SCS_VL15_CREDITS

#define PIO_THRESHOLD_CEILING

#define PIO_WAIT_BATCH_SIZE

/* default send context sizes */
static struct sc_config_sizes sc_config_sizes[SC_MAX] =;

/* send context memory pool configuration */
struct mem_pool_config {};

/* default memory pool configuration: 100% in pool 0 */
static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] =;

/* memory pool information, used when calculating final sizes */
struct mem_pool_info {};

/*
 * Convert a pool wildcard to a valid pool index.  The wildcards
 * start at -1 and increase negatively.  Map them as:
 *	-1 => 0
 *	-2 => 1
 *	etc.
 *
 * Return -1 on non-wildcard input, otherwise convert to a pool number.
 */
static int wildcard_to_pool(int wc)
{}

static const char *sc_type_names[SC_MAX] =;

static const char *sc_type_name(int index)
{}

/*
 * Read the send context memory pool configuration and send context
 * size configuration.  Replace any wildcards and come up with final
 * counts and sizes for the send context types.
 */
int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
{}

int init_send_contexts(struct hfi1_devdata *dd)
{}

/*
 * Allocate a software index and hardware context of the given type.
 *
 * Must be called with dd->sc_lock held.
 */
static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index,
		       u32 *hw_context)
{}

/*
 * Free the send context given by its software index.
 *
 * Must be called with dd->sc_lock held.
 */
static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context)
{}

/* return the base context of a context in a group */
static inline u32 group_context(u32 context, u32 group)
{}

/* return the size of a group */
static inline u32 group_size(u32 group)
{}

/*
 * Obtain the credit return addresses, kernel virtual and bus, for the
 * given sc.
 *
 * To understand this routine:
 * o va and dma are arrays of struct credit_return.  One for each physical
 *   send context, per NUMA.
 * o Each send context always looks in its relative location in a struct
 *   credit_return for its credit return.
 * o Each send context in a group must have its return address CSR programmed
 *   with the same value.  Use the address of the first send context in the
 *   group.
 */
static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma)
{}

/*
 * Work queue function triggered in error interrupt routine for
 * kernel contexts.
 */
static void sc_halted(struct work_struct *work)
{}

/*
 * Calculate PIO block threshold for this send context using the given MTU.
 * Trigger a return when one MTU plus optional header of credits remain.
 *
 * Parameter mtu is in bytes.
 * Parameter hdrqentsize is in DWORDs.
 *
 * Return value is what to write into the CSR: trigger return when
 * unreturned credits pass this count.
 */
u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize)
{}

/*
 * Calculate credit threshold in terms of percent of the allocated credits.
 * Trigger when unreturned credits equal or exceed the percentage of the whole.
 *
 * Return value is what to write into the CSR: trigger return when
 * unreturned credits pass this count.
 */
u32 sc_percent_to_threshold(struct send_context *sc, u32 percent)
{}

/*
 * Set the credit return threshold.
 */
void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
{}

/*
 * set_pio_integrity
 *
 * Set the CHECK_ENABLE register for the send context 'sc'.
 */
void set_pio_integrity(struct send_context *sc)
{}

static u32 get_buffers_allocated(struct send_context *sc)
{}

static void reset_buffers_allocated(struct send_context *sc)
{}

/*
 * Allocate a NUMA relative send context structure of the given type along
 * with a HW context.
 */
struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
			      uint hdrqentsize, int numa)
{}

/* free a per-NUMA send context structure */
void sc_free(struct send_context *sc)
{}

/* disable the context */
void sc_disable(struct send_context *sc)
{}

/* return SendEgressCtxtStatus.PacketOccupancy */
static u64 packet_occupancy(u64 reg)
{}

/* is egress halted on the context? */
static bool egress_halted(u64 reg)
{}

/* is the send context halted? */
static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context)
{}

/**
 * sc_wait_for_packet_egress - wait for packet
 * @sc: valid send context
 * @pause: wait for credit return
 *
 * Wait for packet egress, optionally pause for credit return
 *
 * Egress halt and Context halt are not necessarily the same thing, so
 * check for both.
 *
 * NOTE: The context halt bit may not be set immediately.  Because of this,
 * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW
 * context bit to determine if the context is halted.
 */
static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
{}

void sc_wait(struct hfi1_devdata *dd)
{}

/*
 * Restart a context after it has been halted due to error.
 *
 * If the first step fails - wait for the halt to be asserted, return early.
 * Otherwise complain about timeouts but keep going.
 *
 * It is expected that allocations (enabled flag bit) have been shut off
 * already (only applies to kernel contexts).
 */
int sc_restart(struct send_context *sc)
{}

/*
 * PIO freeze processing.  To be called after the TXE block is fully frozen.
 * Go through all frozen send contexts and disable them.  The contexts are
 * already stopped by the freeze.
 */
void pio_freeze(struct hfi1_devdata *dd)
{}

/*
 * Unfreeze PIO for kernel send contexts.  The precondition for calling this
 * is that all PIO send contexts have been disabled and the SPC freeze has
 * been cleared.  Now perform the last step and re-enable each kernel context.
 * User (PSM) processing will occur when PSM calls into the kernel to
 * acknowledge the freeze.
 */
void pio_kernel_unfreeze(struct hfi1_devdata *dd)
{}

/**
 * pio_kernel_linkup() - Re-enable send contexts after linkup event
 * @dd: valid devive data
 *
 * When the link goes down, the freeze path is taken.  However, a link down
 * event is different from a freeze because if the send context is re-enabled
 * whowever is sending data will start sending data again, which will hang
 * any QP that is sending data.
 *
 * The freeze path now looks at the type of event that occurs and takes this
 * path for link down event.
 */
void pio_kernel_linkup(struct hfi1_devdata *dd)
{}

/*
 * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
 * Returns:
 *	-ETIMEDOUT - if we wait too long
 *	-EIO	   - if there was an error
 */
static int pio_init_wait_progress(struct hfi1_devdata *dd)
{}

/*
 * Reset all of the send contexts to their power-on state.  Used
 * only during manual init - no lock against sc_enable needed.
 */
void pio_reset_all(struct hfi1_devdata *dd)
{}

/* enable the context */
int sc_enable(struct send_context *sc)
{}

/* force a credit return on the context */
void sc_return_credits(struct send_context *sc)
{}

/* allow all in-flight packets to drain on the context */
void sc_flush(struct send_context *sc)
{}

/* drop all packets on the context, no waiting until they are sent */
void sc_drop(struct send_context *sc)
{}

/*
 * Start the software reaction to a context halt or SPC freeze:
 *	- mark the context as halted or frozen
 *	- stop buffer allocations
 *
 * Called from the error interrupt.  Other work is deferred until
 * out of the interrupt.
 */
void sc_stop(struct send_context *sc, int flag)
{}

#define BLOCK_DWORDS
#define dwords_to_blocks(x)

/*
 * The send context buffer "allocator".
 *
 * @sc: the PIO send context we are allocating from
 * @len: length of whole packet - including PBC - in dwords
 * @cb: optional callback to call when the buffer is finished sending
 * @arg: argument for cb
 *
 * Return a pointer to a PIO buffer, NULL if not enough room, -ECOMM
 * when link is down.
 */
struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
				pio_release_cb cb, void *arg)
{}

/*
 * There are at least two entities that can turn on credit return
 * interrupts and they can overlap.  Avoid problems by implementing
 * a count scheme that is enforced by a lock.  The lock is needed because
 * the count and CSR write must be paired.
 */

/*
 * Start credit return interrupts.  This is managed by a count.  If already
 * on, just increment the count.
 */
void sc_add_credit_return_intr(struct send_context *sc)
{}

/*
 * Stop credit return interrupts.  This is managed by a count.  Decrement the
 * count, if the last user, then turn the credit interrupts off.
 */
void sc_del_credit_return_intr(struct send_context *sc)
{}

/*
 * The caller must be careful when calling this.  All needint calls
 * must be paired with !needint.
 */
void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
{}

/**
 * sc_piobufavail - callback when a PIO buffer is available
 * @sc: the send context
 *
 * This is called from the interrupt handler when a PIO buffer is
 * available after hfi1_verbs_send() returned an error that no buffers were
 * available. Disable the interrupt if there are no more QPs waiting.
 */
static void sc_piobufavail(struct send_context *sc)
{}

/* translate a send credit update to a bit code of reasons */
static inline int fill_code(u64 hw_free)
{}

/* use the jiffies compare to get the wrap right */
#define sent_before(a, b)

/*
 * The send context buffer "releaser".
 */
void sc_release_update(struct send_context *sc)
{}

/*
 * Send context group releaser.  Argument is the send context that caused
 * the interrupt.  Called from the send context interrupt handler.
 *
 * Call release on all contexts in the group.
 *
 * This routine takes the sc_lock without an irqsave because it is only
 * called from an interrupt handler.  Adjust if that changes.
 */
void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
{}

/*
 * pio_select_send_context_vl() - select send context
 * @dd: devdata
 * @selector: a spreading factor
 * @vl: this vl
 *
 * This function returns a send context based on the selector and a vl.
 * The mapping fields are protected by RCU
 */
struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
						u32 selector, u8 vl)
{}

/*
 * pio_select_send_context_sc() - select send context
 * @dd: devdata
 * @selector: a spreading factor
 * @sc5: the 5 bit sc
 *
 * This function returns an send context based on the selector and an sc
 */
struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
						u32 selector, u8 sc5)
{}

/*
 * Free the indicated map struct
 */
static void pio_map_free(struct pio_vl_map *m)
{}

/*
 * Handle RCU callback
 */
static void pio_map_rcu_callback(struct rcu_head *list)
{}

/*
 * Set credit return threshold for the kernel send context
 */
static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
{}

/*
 * pio_map_init - called when #vls change
 * @dd: hfi1_devdata
 * @port: port number
 * @num_vls: number of vls
 * @vl_scontexts: per vl send context mapping (optional)
 *
 * This routine changes the mapping based on the number of vls.
 *
 * vl_scontexts is used to specify a non-uniform vl/send context
 * loading. NULL implies auto computing the loading and giving each
 * VL an uniform distribution of send contexts per VL.
 *
 * The auto algorithm computers the sc_per_vl and the number of extra
 * send contexts. Any extra send contexts are added from the last VL
 * on down
 *
 * rcu locking is used here to control access to the mapping fields.
 *
 * If either the num_vls or num_send_contexts are non-power of 2, the
 * array sizes in the struct pio_vl_map and the struct pio_map_elem are
 * rounded up to the next highest power of 2 and the first entry is
 * reused in a round robin fashion.
 *
 * If an error occurs the map change is not done and the mapping is not
 * chaged.
 *
 */
int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
{}

void free_pio_map(struct hfi1_devdata *dd)
{}

int init_pervl_scs(struct hfi1_devdata *dd)
{}

int init_credit_return(struct hfi1_devdata *dd)
{}

void free_credit_return(struct hfi1_devdata *dd)
{}

void seqfile_dump_sci(struct seq_file *s, u32 i,
		      struct send_context_info *sci)
{}