linux/drivers/infiniband/hw/cxgb4/cm.c

/*
 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/module.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
#include <linux/notifier.h>
#include <linux/inetdevice.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>

#include <net/neighbour.h>
#include <net/netevent.h>
#include <net/route.h>
#include <net/tcp.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>

#include <rdma/ib_addr.h>

#include <libcxgb_cm.h>
#include "iw_cxgb4.h"
#include "clip_tbl.h"

static char *states[] =;

static int nocong;
module_param(nocong, int, 0644);
MODULE_PARM_DESC();

static int enable_ecn;
module_param(enable_ecn, int, 0644);
MODULE_PARM_DESC();

static int dack_mode;
module_param(dack_mode, int, 0644);
MODULE_PARM_DESC();

uint c4iw_max_read_depth =;
module_param(c4iw_max_read_depth, int, 0644);
MODULE_PARM_DESC();

static int enable_tcp_timestamps;
module_param(enable_tcp_timestamps, int, 0644);
MODULE_PARM_DESC();

static int enable_tcp_sack;
module_param(enable_tcp_sack, int, 0644);
MODULE_PARM_DESC();

static int enable_tcp_window_scaling =;
module_param(enable_tcp_window_scaling, int, 0644);
MODULE_PARM_DESC();

static int peer2peer =;
module_param(peer2peer, int, 0644);
MODULE_PARM_DESC();

static int p2p_type =;
module_param(p2p_type, int, 0644);
MODULE_PARM_DESC();

static int ep_timeout_secs =;
module_param(ep_timeout_secs, int, 0644);
MODULE_PARM_DESC();

static int mpa_rev =;
module_param(mpa_rev, int, 0644);
MODULE_PARM_DESC();

static int markers_enabled;
module_param(markers_enabled, int, 0644);
MODULE_PARM_DESC();

static int crc_enabled =;
module_param(crc_enabled, int, 0644);
MODULE_PARM_DESC();

static int rcv_win =;
module_param(rcv_win, int, 0644);
MODULE_PARM_DESC();

static int snd_win =;
module_param(snd_win, int, 0644);
MODULE_PARM_DESC();

static struct workqueue_struct *workq;

static struct sk_buff_head rxq;

static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
static void ep_timeout(struct timer_list *t);
static void connect_reply_upcall(struct c4iw_ep *ep, int status);
static int sched(struct c4iw_dev *dev, struct sk_buff *skb);

static LIST_HEAD(timeout_list);
static DEFINE_SPINLOCK(timeout_lock);

static void deref_cm_id(struct c4iw_ep_common *epc)
{}

static void ref_cm_id(struct c4iw_ep_common *epc)
{}

static void deref_qp(struct c4iw_ep *ep)
{}

static void ref_qp(struct c4iw_ep *ep)
{}

static void start_ep_timer(struct c4iw_ep *ep)
{}

static int stop_ep_timer(struct c4iw_ep *ep)
{}

static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
		  struct l2t_entry *l2e)
{}

int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
{}

static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
{}

static void set_emss(struct c4iw_ep *ep, u16 opt)
{}

static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
{}

static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
{}

static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
{}

static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size)
{}

static void *alloc_ep(int size, gfp_t gfp)
{}

static void remove_ep_tid(struct c4iw_ep *ep)
{}

static int insert_ep_tid(struct c4iw_ep *ep)
{}

/*
 * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
 */
static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid)
{}

/*
 * Atomically lookup the ep ptr given the stid and grab a reference on the ep.
 */
static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev,
					       unsigned int stid)
{}

void _c4iw_free_ep(struct kref *kref)
{}

static void release_ep_resources(struct c4iw_ep *ep)
{}

static int status2errno(int status)
{}

/*
 * Try and reuse skbs already allocated...
 */
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
{}

static struct net_device *get_real_dev(struct net_device *egress_dev)
{}

static void arp_failure_discard(void *handle, struct sk_buff *skb)
{}

static void mpa_start_arp_failure(void *handle, struct sk_buff *skb)
{}

enum {};

static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
{}

/*
 * Fake up a special CPL opcode and call sched() so process_work() will call
 * _put_ep_safe() in a safe context to free the ep resources.  This is needed
 * because ARP error handlers are called in an ATOMIC context, and
 * _c4iw_free_ep() needs to block.
 */
static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb,
				  int cpl)
{}

/* Handle an ARP failure for an accept */
static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
{}

/*
 * Handle an ARP failure for an active open.
 */
static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
{}

/*
 * Handle an ARP failure for a CPL_ABORT_REQ.  Change it into a no RST variant
 * and send it along.
 */
static void abort_arp_failure(void *handle, struct sk_buff *skb)
{}

static int send_flowc(struct c4iw_ep *ep)
{}

static int send_halfclose(struct c4iw_ep *ep)
{}

static void read_tcb(struct c4iw_ep *ep)
{}

static int send_abort_req(struct c4iw_ep *ep)
{}

static int send_abort(struct c4iw_ep *ep)
{}

static int send_connect(struct c4iw_ep *ep)
{}

static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
			u8 mpa_rev_to_use)
{}

static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
{}

static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
{}

static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static void close_complete_upcall(struct c4iw_ep *ep, int status)
{}

static void peer_close_upcall(struct c4iw_ep *ep)
{}

static void peer_abort_upcall(struct c4iw_ep *ep)
{}

static void connect_reply_upcall(struct c4iw_ep *ep, int status)
{}

static int connect_request_upcall(struct c4iw_ep *ep)
{}

static void established_upcall(struct c4iw_ep *ep)
{}

static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
{}

#define RELAXED_IRD_NEGOTIATION

/*
 * process_mpa_reply - process streaming mode MPA reply
 *
 * Returns:
 *
 * 0 upon success indicating a connect request was delivered to the ULP
 * or the mpa request is incomplete but valid so far.
 *
 * 1 if a failure requires the caller to close the connection.
 *
 * 2 if a failure requires the caller to abort the connection.
 */
static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
{}

/*
 * process_mpa_request - process streaming mode MPA request
 *
 * Returns:
 *
 * 0 upon success indicating a connect request was delivered to the ULP
 * or the mpa request is incomplete but valid so far.
 *
 * 1 if a failure requires the caller to close the connection.
 *
 * 2 if a failure requires the caller to abort the connection.
 */
static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
{}

static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static void complete_cached_srq_buffers(struct c4iw_ep *ep, u32 srqidx)
{}

static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
{}

/*
 * Some of the error codes above implicitly indicate that there is no TID
 * allocated with the result of an ACT_OPEN.  We use this predicate to make
 * that explicit.
 */
static inline int act_open_has_tid(int status)
{}

static char *neg_adv_str(unsigned int status)
{}

static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
{}

#define ACT_OPEN_RETRY_COUNT

static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
		     struct dst_entry *dst, struct c4iw_dev *cdev,
		     bool clear_mpa_v1, enum chip_type adapter_type, u8 tos)
{}

static int c4iw_reconnect(struct c4iw_ep *ep)
{}

static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
		     struct cpl_pass_accept_req *req)
{}

static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
{}

static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static void finish_peer_abort(struct c4iw_dev *dev, struct c4iw_ep *ep)
{}

static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
{}

/*
 * Upcall from the adapter indicating data has been transmitted.
 * For us its just the single MPA request or reply.  We can now free
 * the skb holding the mpa message.
 */
static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
{}

int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{}

int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{}

static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
{}

static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
		      unsigned char banned_flags)
{}

static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
{}

int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{}

static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
{}

static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
{}

int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
{}

int c4iw_destroy_listen(struct iw_cm_id *cm_id)
{}

int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
{}

static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
			struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
{}

static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
			struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
{}

static inline u64 t4_tcb_get_field64(__be64 *tcb, u16 word)
{}

static inline u32 t4_tcb_get_field32(__be64 *tcb, u16 word, u32 mask, u32 shift)
{}

static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
{}

static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
				  __be32 laddr, __be16 lport,
				  __be32 raddr, __be16 rport,
				  u32 rcv_isn, u32 filter, u16 window,
				  u32 rss_qid, u8 port_id)
{}

/*
 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
 * messages when a filter is being used instead of server to
 * redirect a syn packet. When packets hit filter they are redirected
 * to the offload queue and driver tries to establish the connection
 * using firmware work request.
 */
static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
{}

/*
 * These are the real handlers that are called from a
 * work queue.
 */
static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] =;

static void process_timeout(struct c4iw_ep *ep)
{}

static void process_timedout_eps(void)
{}

static void process_work(struct work_struct *work)
{}

static DECLARE_WORK(skb_work, process_work);

static void ep_timeout(struct timer_list *t)
{}

/*
 * All the CM events are handled on a work queue to have a safe context.
 */
static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
{}

static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
{}

/*
 * Most upcalls from the T4 Core go to sched() to
 * schedule the processing on a work queue.
 */
c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] =;

int __init c4iw_cm_init(void)
{}

void c4iw_cm_term(void)
{}