// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (c) 2016-2018 Oracle. All rights reserved. * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the BSD-type * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Network Appliance, Inc. nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Tom Tucker <[email protected]> */ /* Operation * * The main entry point is svc_rdma_sendto. This is called by the * RPC server when an RPC Reply is ready to be transmitted to a client. * * The passed-in svc_rqst contains a struct xdr_buf which holds an * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA * transport header, post all Write WRs needed for this Reply, then post * a Send WR conveying the transport header and the RPC message itself to * the client. * * svc_rdma_sendto must fully transmit the Reply before returning, as * the svc_rqst will be recycled as soon as sendto returns. Remaining * resources referred to by the svc_rqst are also recycled at that time. * Therefore any resources that must remain longer must be detached * from the svc_rqst and released later. * * Page Management * * The I/O that performs Reply transmission is asynchronous, and may * complete well after sendto returns. Thus pages under I/O must be * removed from the svc_rqst before sendto returns. * * The logic here depends on Send Queue and completion ordering. Since * the Send WR is always posted last, it will always complete last. Thus * when it completes, it is guaranteed that all previous Write WRs have * also completed. * * Write WRs are constructed and posted. Each Write segment gets its own * svc_rdma_rw_ctxt, allowing the Write completion handler to find and * DMA-unmap the pages under I/O for that Write segment. The Write * completion handler does not release any pages. * * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt. * The ownership of all of the Reply's pages are transferred into that * ctxt, the Send WR is posted, and sendto returns. * * The svc_rdma_send_ctxt is presented when the Send WR completes. The * Send completion handler finally releases the Reply's pages. * * This mechanism also assumes that completions on the transport's Send * Completion Queue do not run in parallel. Otherwise a Write completion * and Send completion running at the same time could release pages that * are still DMA-mapped. * * Error Handling * * - If the Send WR is posted successfully, it will either complete * successfully, or get flushed. Either way, the Send completion * handler releases the Reply's pages. * - If the Send WR cannot be not posted, the forward path releases * the Reply's pages. * * This handles the case, without the use of page reference counting, * where two different Write segments send portions of the same page. */ #include <linux/spinlock.h> #include <linux/unaligned.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> #include <linux/sunrpc/debug.h> #include <linux/sunrpc/svc_rdma.h> #include "xprt_rdma.h" #include <trace/events/rpcrdma.h> static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc); static struct svc_rdma_send_ctxt * svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) { … } /** * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt * @rdma: svcxprt_rdma being torn down * */ void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma) { … } /** * svc_rdma_send_ctxt_get - Get a free send_ctxt * @rdma: controlling svcxprt_rdma * * Returns a ready-to-use send_ctxt, or NULL if none are * available and a fresh one cannot be allocated. */ struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma) { … } static void svc_rdma_send_ctxt_release(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt) { … } static void svc_rdma_send_ctxt_put_async(struct work_struct *work) { … } /** * svc_rdma_send_ctxt_put - Return send_ctxt to free list * @rdma: controlling svcxprt_rdma * @ctxt: object to return to the free list * * Pages left in sc_pages are DMA unmapped and released. */ void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt) { … } /** * svc_rdma_wake_send_waiters - manage Send Queue accounting * @rdma: controlling transport * @avail: Number of additional SQEs that are now available * */ void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail) { … } /** * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC * @cq: Completion Queue context * @wc: Work Completion object * * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that * the Send completion handler could be running. */ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) { … } /** * svc_rdma_post_send - Post a WR chain to the Send Queue * @rdma: transport context * @ctxt: WR chain to post * * Copy fields in @ctxt to stack variables in order to guarantee * that these values remain available after the ib_post_send() call. * In some error flow cases, svc_rdma_wc_send() releases @ctxt. * * Note there is potential for starvation when the Send Queue is * full because there is no order to when waiting threads are * awoken. The transport is typically provisioned with a deep * enough Send Queue that SQ exhaustion should be a rare event. * * Return values: * %0: @ctxt's WR chain was posted successfully * %-ENOTCONN: The connection was lost */ int svc_rdma_post_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt) { … } /** * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list * @sctxt: Send context for the RPC Reply * * Return values: * On success, returns length in bytes of the Reply XDR buffer * that was consumed by the Reply Read list * %-EMSGSIZE on XDR buffer overflow */ static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt) { … } /** * svc_rdma_encode_write_segment - Encode one Write segment * @sctxt: Send context for the RPC Reply * @chunk: Write chunk to push * @remaining: remaining bytes of the payload left in the Write chunk * @segno: which segment in the chunk * * Return values: * On success, returns length in bytes of the Reply XDR buffer * that was consumed by the Write segment, and updates @remaining * %-EMSGSIZE on XDR buffer overflow */ static ssize_t svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt *sctxt, const struct svc_rdma_chunk *chunk, u32 *remaining, unsigned int segno) { … } /** * svc_rdma_encode_write_chunk - Encode one Write chunk * @sctxt: Send context for the RPC Reply * @chunk: Write chunk to push * * Copy a Write chunk from the Call transport header to the * Reply transport header. Update each segment's length field * to reflect the number of bytes written in that segment. * * Return values: * On success, returns length in bytes of the Reply XDR buffer * that was consumed by the Write chunk * %-EMSGSIZE on XDR buffer overflow */ static ssize_t svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt *sctxt, const struct svc_rdma_chunk *chunk) { … } /** * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list * @rctxt: Reply context with information about the RPC Call * @sctxt: Send context for the RPC Reply * * Return values: * On success, returns length in bytes of the Reply XDR buffer * that was consumed by the Reply's Write list * %-EMSGSIZE on XDR buffer overflow */ static ssize_t svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt *rctxt, struct svc_rdma_send_ctxt *sctxt) { … } /** * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk * @rctxt: Reply context with information about the RPC Call * @sctxt: Send context for the RPC Reply * @length: size in bytes of the payload in the Reply chunk * * Return values: * On success, returns length in bytes of the Reply XDR buffer * that was consumed by the Reply's Reply chunk * %-EMSGSIZE on XDR buffer overflow * %-E2BIG if the RPC message is larger than the Reply chunk */ static ssize_t svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt *rctxt, struct svc_rdma_send_ctxt *sctxt, unsigned int length) { … } struct svc_rdma_map_data { … }; /** * svc_rdma_page_dma_map - DMA map one page * @data: pointer to arguments * @page: struct page to DMA map * @offset: offset into the page * @len: number of bytes to map * * Returns: * %0 if DMA mapping was successful * %-EIO if the page cannot be DMA mapped */ static int svc_rdma_page_dma_map(void *data, struct page *page, unsigned long offset, unsigned int len) { … } /** * svc_rdma_iov_dma_map - DMA map an iovec * @data: pointer to arguments * @iov: kvec to DMA map * * ib_dma_map_page() is used here because svc_rdma_dma_unmap() * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively. * * Returns: * %0 if DMA mapping was successful * %-EIO if the iovec cannot be DMA mapped */ static int svc_rdma_iov_dma_map(void *data, const struct kvec *iov) { … } /** * svc_rdma_xb_dma_map - DMA map all segments of an xdr_buf * @xdr: xdr_buf containing portion of an RPC message to transmit * @data: pointer to arguments * * Returns: * %0 if DMA mapping was successful * %-EIO if DMA mapping failed * * On failure, any DMA mappings that have been already done must be * unmapped by the caller. */ static int svc_rdma_xb_dma_map(const struct xdr_buf *xdr, void *data) { … } struct svc_rdma_pullup_data { … }; /** * svc_rdma_xb_count_sges - Count how many SGEs will be needed * @xdr: xdr_buf containing portion of an RPC message to transmit * @data: pointer to arguments * * Returns: * Number of SGEs needed to Send the contents of @xdr inline */ static int svc_rdma_xb_count_sges(const struct xdr_buf *xdr, void *data) { … } /** * svc_rdma_pull_up_needed - Determine whether to use pull-up * @rdma: controlling transport * @sctxt: send_ctxt for the Send WR * @write_pcl: Write chunk list provided by client * @xdr: xdr_buf containing RPC message to transmit * * Returns: * %true if pull-up must be used * %false otherwise */ static bool svc_rdma_pull_up_needed(const struct svcxprt_rdma *rdma, const struct svc_rdma_send_ctxt *sctxt, const struct svc_rdma_pcl *write_pcl, const struct xdr_buf *xdr) { … } /** * svc_rdma_xb_linearize - Copy region of xdr_buf to flat buffer * @xdr: xdr_buf containing portion of an RPC message to copy * @data: pointer to arguments * * Returns: * Always zero. */ static int svc_rdma_xb_linearize(const struct xdr_buf *xdr, void *data) { … } /** * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer * @rdma: controlling transport * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared * @write_pcl: Write chunk list provided by client * @xdr: prepared xdr_buf containing RPC message * * The device is not capable of sending the reply directly. * Assemble the elements of @xdr into the transport header buffer. * * Assumptions: * pull_up_needed has determined that @xdr will fit in the buffer. * * Returns: * %0 if pull-up was successful * %-EMSGSIZE if a buffer manipulation problem occurred */ static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, const struct svc_rdma_pcl *write_pcl, const struct xdr_buf *xdr) { … } /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message * @rdma: controlling transport * @sctxt: send_ctxt for the Send WR * @write_pcl: Write chunk list provided by client * @reply_pcl: Reply chunk provided by client * @xdr: prepared xdr_buf containing RPC message * * Returns: * %0 if DMA mapping was successful. * %-EMSGSIZE if a buffer manipulation problem occurred * %-EIO if DMA mapping failed * * The Send WR's num_sge field is set in all cases. */ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, const struct svc_rdma_pcl *write_pcl, const struct svc_rdma_pcl *reply_pcl, const struct xdr_buf *xdr) { … } /* The svc_rqst and all resources it owns are released as soon as * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt * so they are released by the Send completion handler. */ static void svc_rdma_save_io_pages(struct svc_rqst *rqstp, struct svc_rdma_send_ctxt *ctxt) { … } /* Prepare the portion of the RPC Reply that will be transmitted * via RDMA Send. The RPC-over-RDMA transport header is prepared * in sc_sges[0], and the RPC xdr_buf is prepared in following sges. * * Depending on whether a Write list or Reply chunk is present, * the server may Send all, a portion of, or none of the xdr_buf. * In the latter case, only the transport header (sc_sges[0]) is * transmitted. * * Assumptions: * - The Reply's transport header will never be larger than a page. */ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, const struct svc_rdma_recv_ctxt *rctxt, struct svc_rqst *rqstp) { … } /** * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response * @rdma: controlling transport context * @sctxt: Send context for the response * @rctxt: Receive context for incoming bad message * @status: negative errno indicating error that occurred * * Given the client-provided Read, Write, and Reply chunks, the * server was not able to parse the Call or form a complete Reply. * Return an RDMA_ERROR message so the client can retire the RPC * transaction. * * The caller does not have to release @sctxt. It is released by * Send completion, or by this function on error. */ void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, struct svc_rdma_recv_ctxt *rctxt, int status) { … } /** * svc_rdma_sendto - Transmit an RPC reply * @rqstp: processed RPC request, reply XDR already in ::rq_res * * Any resources still associated with @rqstp are released upon return. * If no reply message was possible, the connection is closed. * * Returns: * %0 if an RPC reply has been successfully posted, * %-ENOMEM if a resource shortage occurred (connection is lost), * %-ENOTCONN if posting failed (connection is lost). */ int svc_rdma_sendto(struct svc_rqst *rqstp) { … } /** * svc_rdma_result_payload - special processing for a result payload * @rqstp: RPC transaction context * @offset: payload's byte offset in @rqstp->rq_res * @length: size of payload, in bytes * * Assign the passed-in result payload to the current Write chunk, * and advance to cur_result_payload to the next Write chunk, if * there is one. * * Return values: * %0 if successful or nothing needed to be done * %-E2BIG if the payload was larger than the Write chunk */ int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length) { … }