linux/drivers/nvme/target/tcp.c

// SPDX-License-Identifier: GPL-2.0
/*
 * NVMe over Fabrics TCP target.
 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
 */
#define pr_fmt(fmt)
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/key.h>
#include <linux/nvme-tcp.h>
#include <linux/nvme-keyring.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <net/tls.h>
#include <net/tls_prot.h>
#include <net/handshake.h>
#include <linux/inet.h>
#include <linux/llist.h>
#include <crypto/hash.h>
#include <trace/events/sock.h>

#include "nvmet.h"

#define NVMET_TCP_DEF_INLINE_DATA_SIZE
#define NVMET_TCP_MAXH2CDATA
#define NVMET_TCP_BACKLOG

static int param_store_val(const char *str, int *val, int min, int max)
{}

static int set_params(const char *str, const struct kernel_param *kp)
{}

static const struct kernel_param_ops set_param_ops =;

/* Define the socket priority to use for connections were it is desirable
 * that the NIC consider performing optimized packet processing or filtering.
 * A non-zero value being sufficient to indicate general consideration of any
 * possible optimization.  Making it a module param allows for alternative
 * values that may be unique for some NIC implementations.
 */
static int so_priority;
device_param_cb();
MODULE_PARM_DESC();

/* Define a time period (in usecs) that io_work() shall sample an activated
 * queue before determining it to be idle.  This optional module behavior
 * can enable NIC solutions that support socket optimized packet processing
 * using advanced interrupt moderation techniques.
 */
static int idle_poll_period_usecs;
device_param_cb();
MODULE_PARM_DESC();

#ifdef CONFIG_NVME_TARGET_TCP_TLS
/*
 * TLS handshake timeout
 */
static int tls_handshake_timeout =;
module_param(tls_handshake_timeout, int, 0644);
MODULE_PARM_DESC();
#endif

#define NVMET_TCP_RECV_BUDGET
#define NVMET_TCP_SEND_BUDGET
#define NVMET_TCP_IO_WORK_BUDGET

enum nvmet_tcp_send_state {};

enum nvmet_tcp_recv_state {};

enum {};

struct nvmet_tcp_cmd {};

enum nvmet_tcp_queue_state {};

struct nvmet_tcp_queue {};

struct nvmet_tcp_port {};

static DEFINE_IDA(nvmet_tcp_queue_ida);
static LIST_HEAD(nvmet_tcp_queue_list);
static DEFINE_MUTEX(nvmet_tcp_queue_mutex);

static struct workqueue_struct *nvmet_tcp_wq;
static const struct nvmet_fabrics_ops nvmet_tcp_ops;
static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);

static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
		struct nvmet_tcp_cmd *cmd)
{}

static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
{}

static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
{}

static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
{}

static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
{}

static inline struct nvmet_tcp_cmd *
nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
{}

static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
{}

static inline int queue_cpu(struct nvmet_tcp_queue *queue)
{}

static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
{}

static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
{}

static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
		void *pdu, size_t len)
{}

static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
	void *pdu, size_t len)
{}

static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
{}

/* If cmd buffers are NULL, no operation is performed */
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
{}

static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
{}

static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
{}

static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
{}

static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
{}

static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
		struct nvmet_tcp_cmd *cmd)
{}

static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
{}

static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
{}

static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
{}

static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
{}

static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
{}

static void nvmet_tcp_queue_response(struct nvmet_req *req)
{}

static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
{}

static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
{}

static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
{}

static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
		bool last_in_batch)
{}

static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
{}

static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
{}

static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
		bool last_in_batch)
{}

static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
		int budget, int *sends)
{}

static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
{}

static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
{}

static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
{}


static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
{}

static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
		struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
{}

static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
{}

static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
{}

static const u8 nvme_tcp_pdu_sizes[] =;

static inline u8 nvmet_tcp_pdu_size(u8 type)
{}

static inline bool nvmet_tcp_pdu_valid(u8 type)
{}

static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue,
		struct msghdr *msg, char *cbuf)
{}

static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
{}

static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
{}

static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
{}

static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
{}

static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
{}

static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
		int budget, int *recvs)
{}

static void nvmet_tcp_release_queue(struct kref *kref)
{}

static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
{}

static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
{}

static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
		int ops)
{}

static void nvmet_tcp_io_work(struct work_struct *w)
{}

static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
		struct nvmet_tcp_cmd *c)
{}

static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
{}

static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
{}

static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
{}

static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
{}

static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
{}

static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
{}

static void nvmet_tcp_release_queue_work(struct work_struct *w)
{}

static void nvmet_tcp_data_ready(struct sock *sk)
{}

static void nvmet_tcp_write_space(struct sock *sk)
{}

static void nvmet_tcp_state_change(struct sock *sk)
{}

static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
{}

#ifdef CONFIG_NVME_TARGET_TCP_TLS
static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
{}

static void nvmet_tcp_tls_handshake_done(void *data, int status,
					 key_serial_t peerid)
{}

static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w)
{}

static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue)
{}
#else
static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) {}
#endif

static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
		struct socket *newsock)
{}

static void nvmet_tcp_accept_work(struct work_struct *w)
{}

static void nvmet_tcp_listen_data_ready(struct sock *sk)
{}

static int nvmet_tcp_add_port(struct nvmet_port *nport)
{}

static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
{}

static void nvmet_tcp_remove_port(struct nvmet_port *nport)
{}

static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
{}

static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
{}

static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
		struct nvmet_port *nport, char *traddr)
{}

static ssize_t nvmet_tcp_host_port_addr(struct nvmet_ctrl *ctrl,
			char *traddr, size_t traddr_len)
{}

static const struct nvmet_fabrics_ops nvmet_tcp_ops =;

static int __init nvmet_tcp_init(void)
{}

static void __exit nvmet_tcp_exit(void)
{}

module_init();
module_exit(nvmet_tcp_exit);

MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_ALIAS(); /* 3 == NVMF_TRTYPE_TCP */