linux/drivers/infiniband/hw/qib/qib_sdma.c

/*
 * Copyright (c) 2012 Intel Corporation. All rights reserved.
 * Copyright (c) 2007 - 2012 QLogic Corporation. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/moduleparam.h>

#include "qib.h"
#include "qib_common.h"

/* default pio off, sdma on */
static ushort sdma_descq_cnt =;
module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
MODULE_PARM_DESC();

/*
 * Bits defined in the send DMA descriptor.
 */
#define SDMA_DESC_LAST
#define SDMA_DESC_FIRST
#define SDMA_DESC_DMA_HEAD
#define SDMA_DESC_USE_LARGE_BUF
#define SDMA_DESC_INTR
#define SDMA_DESC_COUNT_LSB
#define SDMA_DESC_GEN_LSB

/* declare all statics here rather than keep sorting */
static int alloc_sdma(struct qib_pportdata *);
static void sdma_complete(struct kref *);
static void sdma_finalput(struct qib_sdma_state *);
static void sdma_get(struct qib_sdma_state *);
static void sdma_put(struct qib_sdma_state *);
static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
static void sdma_start_sw_clean_up(struct qib_pportdata *);
static void sdma_sw_clean_up_task(struct tasklet_struct *);
static void unmap_desc(struct qib_pportdata *, unsigned);

static void sdma_get(struct qib_sdma_state *ss)
{}

static void sdma_complete(struct kref *kref)
{}

static void sdma_put(struct qib_sdma_state *ss)
{}

static void sdma_finalput(struct qib_sdma_state *ss)
{}

/*
 * Complete all the sdma requests on the active list, in the correct
 * order, and with appropriate processing.   Called when cleaning up
 * after sdma shutdown, and when new sdma requests are submitted for
 * a link that is down.   This matches what is done for requests
 * that complete normally, it's just the full list.
 *
 * Must be called with sdma_lock held
 */
static void clear_sdma_activelist(struct qib_pportdata *ppd)
{}

static void sdma_sw_clean_up_task(struct tasklet_struct *t)
{}

/*
 * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
 * as a result of send buffer errors or send DMA descriptor errors.
 * We want to disarm the buffers in these cases.
 */
static void sdma_hw_start_up(struct qib_pportdata *ppd)
{}

static void sdma_sw_tear_down(struct qib_pportdata *ppd)
{}

static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
{}

static void sdma_set_state(struct qib_pportdata *ppd,
	enum qib_sdma_states next_state)
{}

static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
{}

static int alloc_sdma(struct qib_pportdata *ppd)
{}

static void free_sdma(struct qib_pportdata *ppd)
{}

static inline void make_sdma_desc(struct qib_pportdata *ppd,
				  u64 *sdmadesc, u64 addr, u64 dwlen,
				  u64 dwoffset)
{}

/* sdma_lock must be held */
int qib_sdma_make_progress(struct qib_pportdata *ppd)
{}

/*
 * This is called from interrupt context.
 */
void qib_sdma_intr(struct qib_pportdata *ppd)
{}

void __qib_sdma_intr(struct qib_pportdata *ppd)
{}

int qib_setup_sdma(struct qib_pportdata *ppd)
{}

void qib_teardown_sdma(struct qib_pportdata *ppd)
{}

int qib_sdma_running(struct qib_pportdata *ppd)
{}

/*
 * Complete a request when sdma not running; likely only request
 * but to simplify the code, always queue it, then process the full
 * activelist.  We process the entire list to ensure that this particular
 * request does get it's callback, but in the correct order.
 * Must be called with sdma_lock held
 */
static void complete_sdma_err_req(struct qib_pportdata *ppd,
				  struct qib_verbs_txreq *tx)
{}

/*
 * This function queues one IB packet onto the send DMA queue per call.
 * The caller is responsible for checking:
 * 1) The number of send DMA descriptor entries is less than the size of
 *    the descriptor queue.
 * 2) The IB SGE addresses and lengths are 32-bit aligned
 *    (except possibly the last SGE's length)
 * 3) The SGE addresses are suitable for passing to dma_map_single().
 */
int qib_sdma_verbs_send(struct qib_pportdata *ppd,
			struct rvt_sge_state *ss, u32 dwords,
			struct qib_verbs_txreq *tx)
{}

/*
 * sdma_lock should be acquired before calling this routine
 */
void dump_sdma_state(struct qib_pportdata *ppd)
{}

void qib_sdma_process_event(struct qib_pportdata *ppd,
	enum qib_sdma_events event)
{}

void __qib_sdma_process_event(struct qib_pportdata *ppd,
	enum qib_sdma_events event)
{}