linux/drivers/nvme/host/tcp.c

// SPDX-License-Identifier: GPL-2.0
/*
 * NVMe over Fabrics TCP host.
 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
 */
#define pr_fmt(fmt)
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/key.h>
#include <linux/nvme-tcp.h>
#include <linux/nvme-keyring.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <net/tls.h>
#include <net/tls_prot.h>
#include <net/handshake.h>
#include <linux/blk-mq.h>
#include <crypto/hash.h>
#include <net/busy_poll.h>
#include <trace/events/sock.h>

#include "nvme.h"
#include "fabrics.h"

struct nvme_tcp_queue;

/* Define the socket priority to use for connections were it is desirable
 * that the NIC consider performing optimized packet processing or filtering.
 * A non-zero value being sufficient to indicate general consideration of any
 * possible optimization.  Making it a module param allows for alternative
 * values that may be unique for some NIC implementations.
 */
static int so_priority;
module_param(so_priority, int, 0644);
MODULE_PARM_DESC();

/*
 * Use the unbound workqueue for nvme_tcp_wq, then we can set the cpu affinity
 * from sysfs.
 */
static bool wq_unbound;
module_param(wq_unbound, bool, 0644);
MODULE_PARM_DESC();

/*
 * TLS handshake timeout
 */
static int tls_handshake_timeout =;
#ifdef CONFIG_NVME_TCP_TLS
module_param(tls_handshake_timeout, int, 0644);
MODULE_PARM_DESC();
#endif

#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* lockdep can detect a circular dependency of the form
 *   sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
 * because dependencies are tracked for both nvme-tcp and user contexts. Using
 * a separate class prevents lockdep from conflating nvme-tcp socket use with
 * user-space socket API use.
 */
static struct lock_class_key nvme_tcp_sk_key[2];
static struct lock_class_key nvme_tcp_slock_key[2];

static void nvme_tcp_reclassify_socket(struct socket *sock)
{}
#else
static void nvme_tcp_reclassify_socket(struct socket *sock) { }
#endif

enum nvme_tcp_send_state {};

struct nvme_tcp_request {};

enum nvme_tcp_queue_flags {};

enum nvme_tcp_recv_state {};

struct nvme_tcp_ctrl;
struct nvme_tcp_queue {};

struct nvme_tcp_ctrl {};

static LIST_HEAD(nvme_tcp_ctrl_list);
static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
static struct workqueue_struct *nvme_tcp_wq;
static const struct blk_mq_ops nvme_tcp_mq_ops;
static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);

static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
{}

static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
{}

/*
 * Check if the queue is TLS encrypted
 */
static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue)
{}

/*
 * Check if TLS is configured for the controller.
 */
static inline bool nvme_tcp_tls_configured(struct nvme_ctrl *ctrl)
{}

static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
{}

static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
{}

static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
{}

static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req)
{}

static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req)
{}

static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
{}

static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
{}

static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
{}

static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
{}

static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
{}

static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
{}

static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
{}

static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
		int len)
{}

static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
		unsigned int dir)
{}

static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
		int len)
{}

static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
{}

static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue)
{}

static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
{}

static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
		bool sync, bool last)
{}

static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
{}

static inline struct nvme_tcp_request *
nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
{}

static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
		__le32 *dgst)
{}

static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
		struct page *page, off_t off, size_t len)
{}

static inline void nvme_tcp_hdgst(struct ahash_request *hash,
		void *pdu, size_t len)
{}

static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
		void *pdu, size_t pdu_len)
{}

static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
{}

static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
		struct request *rq, unsigned int hctx_idx)
{}

static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
		struct request *rq, unsigned int hctx_idx,
		unsigned int numa_node)
{}

static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
{}

static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
{}

static enum nvme_tcp_recv_state
nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
{}

static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
{}

static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
{}

static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
		struct nvme_completion *cqe)
{}

static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
		struct nvme_tcp_data_pdu *pdu)
{}

static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
		struct nvme_tcp_rsp_pdu *pdu)
{}

static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
{}

static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
		struct nvme_tcp_r2t_pdu *pdu)
{}

static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
		unsigned int *offset, size_t *len)
{}

static inline void nvme_tcp_end_request(struct request *rq, u16 status)
{}

static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
			      unsigned int *offset, size_t *len)
{}

static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
		struct sk_buff *skb, unsigned int *offset, size_t *len)
{}

static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
			     unsigned int offset, size_t len)
{}

static void nvme_tcp_data_ready(struct sock *sk)
{}

static void nvme_tcp_write_space(struct sock *sk)
{}

static void nvme_tcp_state_change(struct sock *sk)
{}

static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
{}

static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
{}

static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
{}

static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
{}

static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
{}

static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
{}

static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
{}

static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
{}

static void nvme_tcp_io_work(struct work_struct *w)
{}

static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
{}

static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
{}

static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
{}

static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
{}

static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
{}

static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
{}

static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
{}

static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
{}

static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
{}

static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
{}

static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
{}

static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
{}

static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
			      struct nvme_tcp_queue *queue,
			      key_serial_t pskid)
{}

static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
				key_serial_t pskid)
{}

static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
{}

static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
{}

static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
{}

static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
{}

static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
{}

static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
{}

static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
{}

static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
{}

static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
				    int first, int last)
{}

static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
{}

static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{}

static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{}

static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
{}

static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
{}

static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
{}

static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
{}

static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
		bool remove)
{}

static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
		bool remove)
{}

static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl,
		int status)
{}

static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
{}

static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
{}

static void nvme_tcp_error_recovery_work(struct work_struct *work)
{}

static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
{}

static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
{}

static void nvme_reset_ctrl_work(struct work_struct *work)
{}

static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
{}

static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
{}

static void nvme_tcp_set_sg_null(struct nvme_command *c)
{}

static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
		struct nvme_command *c, u32 data_len)
{}

static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
		u32 data_len)
{}

static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
{}

static void nvme_tcp_complete_timed_out(struct request *rq)
{}

static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
{}

static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
			struct request *rq)
{}

static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
		struct request *rq)
{}

static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
{}

static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
		const struct blk_mq_queue_data *bd)
{}

static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
{}

static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{}

static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
{}

static const struct blk_mq_ops nvme_tcp_mq_ops =;

static const struct blk_mq_ops nvme_tcp_admin_mq_ops =;

static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops =;

static bool
nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
{}

static struct nvme_tcp_ctrl *nvme_tcp_alloc_ctrl(struct device *dev,
		struct nvmf_ctrl_options *opts)
{}

static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
		struct nvmf_ctrl_options *opts)
{}

static struct nvmf_transport_ops nvme_tcp_transport =;

static int __init nvme_tcp_init_module(void)
{}

static void __exit nvme_tcp_cleanup_module(void)
{}

module_init();
module_exit(nvme_tcp_cleanup_module);

MODULE_DESCRIPTION();
MODULE_LICENSE();