linux/net/sunrpc/xprtrdma/svc_rdma_pcl.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2020 Oracle. All rights reserved.
 */

#include <linux/sunrpc/svc_rdma.h>
#include <linux/sunrpc/rpc_rdma.h>

#include "xprt_rdma.h"
#include <trace/events/rpcrdma.h>

/**
 * pcl_free - Release all memory associated with a parsed chunk list
 * @pcl: parsed chunk list
 *
 */
void pcl_free(struct svc_rdma_pcl *pcl)
{}

static struct svc_rdma_chunk *pcl_alloc_chunk(u32 segcount, u32 position)
{}

static struct svc_rdma_chunk *
pcl_lookup_position(struct svc_rdma_pcl *pcl, u32 position)
{}

static void pcl_insert_position(struct svc_rdma_pcl *pcl,
				struct svc_rdma_chunk *chunk)
{}

static void pcl_set_read_segment(const struct svc_rdma_recv_ctxt *rctxt,
				 struct svc_rdma_chunk *chunk,
				 u32 handle, u32 length, u64 offset)
{}

/**
 * pcl_alloc_call - Construct a parsed chunk list for the Call body
 * @rctxt: Ingress receive context
 * @p: Start of an un-decoded Read list
 *
 * Assumptions:
 * - The incoming Read list has already been sanity checked.
 * - cl_count is already set to the number of segments in
 *   the un-decoded list.
 * - The list might not be in order by position.
 *
 * Return values:
 *       %true: Parsed chunk list was successfully constructed, and
 *              cl_count is updated to be the number of chunks (ie.
 *              unique positions) in the Read list.
 *      %false: Memory allocation failed.
 */
bool pcl_alloc_call(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
{}

/**
 * pcl_alloc_read - Construct a parsed chunk list for normal Read chunks
 * @rctxt: Ingress receive context
 * @p: Start of an un-decoded Read list
 *
 * Assumptions:
 * - The incoming Read list has already been sanity checked.
 * - cl_count is already set to the number of segments in
 *   the un-decoded list.
 * - The list might not be in order by position.
 *
 * Return values:
 *       %true: Parsed chunk list was successfully constructed, and
 *              cl_count is updated to be the number of chunks (ie.
 *              unique position values) in the Read list.
 *      %false: Memory allocation failed.
 *
 * TODO:
 * - Check for chunk range overlaps
 */
bool pcl_alloc_read(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
{}

/**
 * pcl_alloc_write - Construct a parsed chunk list from a Write list
 * @rctxt: Ingress receive context
 * @pcl: Parsed chunk list to populate
 * @p: Start of an un-decoded Write list
 *
 * Assumptions:
 * - The incoming Write list has already been sanity checked, and
 * - cl_count is set to the number of chunks in the un-decoded list.
 *
 * Return values:
 *       %true: Parsed chunk list was successfully constructed.
 *      %false: Memory allocation failed.
 */
bool pcl_alloc_write(struct svc_rdma_recv_ctxt *rctxt,
		     struct svc_rdma_pcl *pcl, __be32 *p)
{}

static int pcl_process_region(const struct xdr_buf *xdr,
			      unsigned int offset, unsigned int length,
			      int (*actor)(const struct xdr_buf *, void *),
			      void *data)
{}

/**
 * pcl_process_nonpayloads - Process non-payload regions inside @xdr
 * @pcl: Chunk list to process
 * @xdr: xdr_buf to process
 * @actor: Function to invoke on each non-payload region
 * @data: Arguments for @actor
 *
 * This mechanism must ignore not only result payloads that were already
 * sent via RDMA Write, but also XDR padding for those payloads that
 * the upper layer has added.
 *
 * Assumptions:
 *  The xdr->len and ch_position fields are aligned to 4-byte multiples.
 *
 * Returns:
 *   On success, zero,
 *   %-EMSGSIZE on XDR buffer overflow, or
 *   The return value of @actor
 */
int pcl_process_nonpayloads(const struct svc_rdma_pcl *pcl,
			    const struct xdr_buf *xdr,
			    int (*actor)(const struct xdr_buf *, void *),
			    void *data)
{}