linux/drivers/infiniband/core/iwcm.c

/*
 * Copyright (c) 2004, 2005 Intel Corporation.  All rights reserved.
 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
 * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/sysctl.h>

#include <rdma/iw_cm.h>
#include <rdma/ib_addr.h>
#include <rdma/iw_portmap.h>
#include <rdma/rdma_netlink.h>

#include "iwcm.h"

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();

static const char * const iwcm_rej_reason_strs[] =;

const char *__attribute_const__ iwcm_reject_msg(int reason)
{}
EXPORT_SYMBOL();

static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] =;

static struct workqueue_struct *iwcm_wq;
struct iwcm_work {};

static unsigned int default_backlog =;

static struct ctl_table_header *iwcm_ctl_table_hdr;
static struct ctl_table iwcm_ctl_table[] =;

/*
 * The following services provide a mechanism for pre-allocating iwcm_work
 * elements.  The design pre-allocates them  based on the cm_id type:
 *	LISTENING IDS: 	Get enough elements preallocated to handle the
 *			listen backlog.
 *	ACTIVE IDS:	4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
 *	PASSIVE IDS:	3: ESTABLISHED, DISCONNECT, CLOSE
 *
 * Allocating them in connect and listen avoids having to deal
 * with allocation failures on the event upcall from the provider (which
 * is called in the interrupt context).
 *
 * One exception is when creating the cm_id for incoming connection requests.
 * There are two cases:
 * 1) in the event upcall, cm_event_handler(), for a listening cm_id.  If
 *    the backlog is exceeded, then no more connection request events will
 *    be processed.  cm_event_handler() returns -ENOMEM in this case.  Its up
 *    to the provider to reject the connection request.
 * 2) in the connection request workqueue handler, cm_conn_req_handler().
 *    If work elements cannot be allocated for the new connect request cm_id,
 *    then IWCM will call the provider reject method.  This is ok since
 *    cm_conn_req_handler() runs in the workqueue thread context.
 */

static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
{}

static void put_work(struct iwcm_work *work)
{}

static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
{}

static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
{}

/*
 * Save private data from incoming connection requests to
 * iw_cm_event, so the low level driver doesn't have to. Adjust
 * the event ptr to point to the local copy.
 */
static int copy_private_data(struct iw_cm_event *event)
{}

static void free_cm_id(struct iwcm_id_private *cm_id_priv)
{}

/*
 * Release a reference on cm_id. If the last reference is being
 * released, free the cm_id and return 'true'.
 */
static bool iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
{}

static void add_ref(struct iw_cm_id *cm_id)
{}

static void rem_ref(struct iw_cm_id *cm_id)
{}

static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);

struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
				 iw_cm_handler cm_handler,
				 void *context)
{}
EXPORT_SYMBOL();


static int iwcm_modify_qp_err(struct ib_qp *qp)
{}

/*
 * This is really the RDMAC CLOSING state. It is most similar to the
 * IB SQD QP state.
 */
static int iwcm_modify_qp_sqd(struct ib_qp *qp)
{}

/*
 * CM_ID <-- CLOSING
 *
 * Block if a passive or active connection is currently being processed. Then
 * process the event as follows:
 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
 *   based on the abrupt flag
 * - If the connection is already in the CLOSING or IDLE state, the peer is
 *   disconnecting concurrently with us and we've already seen the
 *   DISCONNECT event -- ignore the request and return 0
 * - Disconnect on a listening endpoint returns -EINVAL
 */
int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
{}
EXPORT_SYMBOL();

/*
 * CM_ID <-- DESTROYING
 *
 * Clean up all resources associated with the connection and release
 * the initial reference taken by iw_create_cm_id.
 *
 * Returns true if and only if the last cm_id_priv reference has been dropped.
 */
static bool destroy_cm_id(struct iw_cm_id *cm_id)
{}

/*
 * This function is only called by the application thread and cannot
 * be called by the event thread. The function will wait for all
 * references to be released on the cm_id and then kfree the cm_id
 * object.
 */
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
{}
EXPORT_SYMBOL();

/**
 * iw_cm_check_wildcard - If IP address is 0 then use original
 * @pm_addr: sockaddr containing the ip to check for wildcard
 * @cm_addr: sockaddr containing the actual IP address
 * @cm_outaddr: sockaddr to set IP addr which leaving port
 *
 *  Checks the pm_addr for wildcard and then sets cm_outaddr's
 *  IP to the actual (cm_addr).
 */
static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
				 struct sockaddr_storage *cm_addr,
				 struct sockaddr_storage *cm_outaddr)
{}

/**
 * iw_cm_map - Use portmapper to map the ports
 * @cm_id: connection manager pointer
 * @active: Indicates the active side when true
 * returns nonzero for error only if iwpm_create_mapinfo() fails
 *
 * Tries to add a mapping for a port using the Portmapper. If
 * successful in mapping the IP/Port it will check the remote
 * mapped IP address for a wildcard IP address and replace the
 * zero IP address with the remote_addr.
 */
static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
{}

/*
 * CM_ID <-- LISTEN
 *
 * Start listening for connect requests. Generates one CONNECT_REQUEST
 * event for each inbound connect request.
 */
int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
{}
EXPORT_SYMBOL();

/*
 * CM_ID <-- IDLE
 *
 * Rejects an inbound connection request. No events are generated.
 */
int iw_cm_reject(struct iw_cm_id *cm_id,
		 const void *private_data,
		 u8 private_data_len)
{}
EXPORT_SYMBOL();

/*
 * CM_ID <-- ESTABLISHED
 *
 * Accepts an inbound connection request and generates an ESTABLISHED
 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
 * until the ESTABLISHED event is received from the provider.
 */
int iw_cm_accept(struct iw_cm_id *cm_id,
		 struct iw_cm_conn_param *iw_param)
{}
EXPORT_SYMBOL();

/*
 * Active Side: CM_ID <-- CONN_SENT
 *
 * If successful, results in the generation of a CONNECT_REPLY
 * event. iw_cm_disconnect and iw_cm_destroy will block until the
 * CONNECT_REPLY event is received from the provider.
 */
int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
{}
EXPORT_SYMBOL();

/*
 * Passive Side: new CM_ID <-- CONN_RECV
 *
 * Handles an inbound connect request. The function creates a new
 * iw_cm_id to represent the new connection and inherits the client
 * callback function and other attributes from the listening parent.
 *
 * The work item contains a pointer to the listen_cm_id and the event. The
 * listen_cm_id contains the client cm_handler, context and
 * device. These are copied when the device is cloned. The event
 * contains the new four tuple.
 *
 * An error on the child should not affect the parent, so this
 * function does not return a value.
 */
static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
				struct iw_cm_event *iw_event)
{}

/*
 * Passive Side: CM_ID <-- ESTABLISHED
 *
 * The provider generated an ESTABLISHED event which means that
 * the MPA negotion has completed successfully and we are now in MPA
 * FPDU mode.
 *
 * This event can only be received in the CONN_RECV state. If the
 * remote peer closed, the ESTABLISHED event would be received followed
 * by the CLOSE event. If the app closes, it will block until we wake
 * it up after processing this event.
 */
static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
			       struct iw_cm_event *iw_event)
{}

/*
 * Active Side: CM_ID <-- ESTABLISHED
 *
 * The app has called connect and is waiting for the established event to
 * post it's requests to the server. This event will wake up anyone
 * blocked in iw_cm_disconnect or iw_destroy_id.
 */
static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
			       struct iw_cm_event *iw_event)
{}

/*
 * CM_ID <-- CLOSING
 *
 * If in the ESTABLISHED state, move to CLOSING.
 */
static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
				  struct iw_cm_event *iw_event)
{}

/*
 * CM_ID <-- IDLE
 *
 * If in the ESTBLISHED or CLOSING states, the QP will have have been
 * moved by the provider to the ERR state. Disassociate the CM_ID from
 * the QP,  move to IDLE, and remove the 'connected' reference.
 *
 * If in some other state, the cm_id was destroyed asynchronously.
 * This is the last reference that will result in waking up
 * the app thread blocked in iw_destroy_cm_id.
 */
static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
				  struct iw_cm_event *iw_event)
{}

static int process_event(struct iwcm_id_private *cm_id_priv,
			 struct iw_cm_event *iw_event)
{}

/*
 * Process events on the work_list for the cm_id. If the callback
 * function requests that the cm_id be deleted, a flag is set in the
 * cm_id flags to indicate that when the last reference is
 * removed, the cm_id is to be destroyed. This is necessary to
 * distinguish between an object that will be destroyed by the app
 * thread asleep on the destroy_comp list vs. an object destroyed
 * here synchronously when the last reference is removed.
 */
static void cm_work_handler(struct work_struct *_work)
{}

/*
 * This function is called on interrupt context. Schedule events on
 * the iwcm_wq thread to allow callback functions to downcall into
 * the CM and/or block.  Events are queued to a per-CM_ID
 * work_list. If this is the first event on the work_list, the work
 * element is also queued on the iwcm_wq thread.
 *
 * Each event holds a reference on the cm_id. Until the last posted
 * event has been delivered and processed, the cm_id cannot be
 * deleted.
 *
 * Returns:
 * 	      0	- the event was handled.
 *	-ENOMEM	- the event was not handled due to lack of resources.
 */
static int cm_event_handler(struct iw_cm_id *cm_id,
			     struct iw_cm_event *iw_event)
{}

static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
				  struct ib_qp_attr *qp_attr,
				  int *qp_attr_mask)
{}

static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
				  struct ib_qp_attr *qp_attr,
				  int *qp_attr_mask)
{}

int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
		       struct ib_qp_attr *qp_attr,
		       int *qp_attr_mask)
{}
EXPORT_SYMBOL();

static int __init iw_cm_init(void)
{}

static void __exit iw_cm_cleanup(void)
{}

MODULE_ALIAS_RDMA_NETLINK();

module_init();
module_exit(iw_cm_cleanup);