linux/fs/nfs/pnfs.c

/*
 *  pNFS functions to call and manage layout drivers.
 *
 *  Copyright (c) 2002 [year of first publication]
 *  The Regents of the University of Michigan
 *  All Rights Reserved
 *
 *  Dean Hildebrand <[email protected]>
 *
 *  Permission is granted to use, copy, create derivative works, and
 *  redistribute this software and such derivative works for any purpose,
 *  so long as the name of the University of Michigan is not used in
 *  any advertising or publicity pertaining to the use or distribution
 *  of this software without specific, written prior authorization. If
 *  the above copyright notice or any other identification of the
 *  University of Michigan is included in any copy of any portion of
 *  this software, then the disclaimer below must also be included.
 *
 *  This software is provided as is, without representation or warranty
 *  of any kind either express or implied, including without limitation
 *  the implied warranties of merchantability, fitness for a particular
 *  purpose, or noninfringement.  The Regents of the University of
 *  Michigan shall not be liable for any damages, including special,
 *  indirect, incidental, or consequential damages, with respect to any
 *  claim arising out of or in connection with the use of the software,
 *  even if it has been or is hereafter advised of the possibility of
 *  such damages.
 */

#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/module.h>
#include <linux/sort.h>
#include "internal.h"
#include "pnfs.h"
#include "iostat.h"
#include "nfs4trace.h"
#include "delegation.h"
#include "nfs42.h"
#include "nfs4_fs.h"

#define NFSDBG_FACILITY
#define PNFS_LAYOUTGET_RETRY_TIMEOUT

/* Locking:
 *
 * pnfs_spinlock:
 *      protects pnfs_modules_tbl.
 */
static DEFINE_SPINLOCK(pnfs_spinlock);

/*
 * pnfs_modules_tbl holds all pnfs modules
 */
static LIST_HEAD(pnfs_modules_tbl);

static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
		struct list_head *free_me,
		const struct pnfs_layout_range *range,
		u32 seq);
static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
		                struct list_head *tmp_list);
static int pnfs_layout_return_on_reboot(struct pnfs_layout_hdr *lo);

/* Return the registered pnfs layout driver module matching given id */
static struct pnfs_layoutdriver_type *
find_pnfs_driver_locked(u32 id)
{}

static struct pnfs_layoutdriver_type *
find_pnfs_driver(u32 id)
{}

const struct pnfs_layoutdriver_type *pnfs_find_layoutdriver(u32 id)
{}

void pnfs_put_layoutdriver(const struct pnfs_layoutdriver_type *ld)
{}

void
unset_pnfs_layoutdriver(struct nfs_server *nfss)
{}

/*
 * When the server sends a list of layout types, we choose one in the order
 * given in the list below.
 *
 * FIXME: should this list be configurable in some fashion? module param?
 * 	  mount option? something else?
 */
static const u32 ld_prefs[] =;

static int
ld_cmp(const void *e1, const void *e2)
{}

/*
 * Try to set the server's pnfs module to the pnfs layout type specified by id.
 * Currently only one pNFS layout driver per filesystem is supported.
 *
 * @ids array of layout types supported by MDS.
 */
void
set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
		      struct nfs_fsinfo *fsinfo)
{}

int
pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
{}
EXPORT_SYMBOL_GPL();

void
pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
{}
EXPORT_SYMBOL_GPL();

/*
 * pNFS client layout cache
 */

/* Need to hold i_lock if caller does not already hold reference */
void
pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
{}

static struct pnfs_layout_hdr *
pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
{}

static void
pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
{}

static void
pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
{}

void
pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
{}

static struct inode *
pnfs_grab_inode_layout_hdr(struct pnfs_layout_hdr *lo)
{}

/*
 * Compare 2 layout stateid sequence ids, to see which is newer,
 * taking into account wraparound issues.
 */
static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
{}

static void pnfs_barrier_update(struct pnfs_layout_hdr *lo, u32 newseq)
{}

static void
pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
			 u32 seq)
{}

static void
pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
{}

static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
{}

static void
pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
		struct list_head *free_me)
{}

/*
 * Update the seqid of a layout stateid after receiving
 * NFS4ERR_OLD_STATEID
 */
bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
		struct pnfs_layout_range *dst_range,
		struct inode *inode)
{}

/*
 * Mark a pnfs_layout_hdr and all associated layout segments as invalid
 *
 * In order to continue using the pnfs_layout_hdr, a full recovery
 * is required.
 * Note that caller must hold inode->i_lock.
 */
int
pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
		struct list_head *lseg_list)
{}

static int pnfs_mark_layout_stateid_return(struct pnfs_layout_hdr *lo,
					   struct list_head *lseg_list,
					   enum pnfs_iomode iomode, u32 seq)
{}

static int
pnfs_iomode_to_fail_bit(u32 iomode)
{}

static void
pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
{}

static void
pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
{}

static void
pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
{}

static bool
pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
{}

static void
pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
		const struct pnfs_layout_range *range,
		const nfs4_stateid *stateid)
{}

static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
{}

static void
pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
		struct pnfs_layout_segment *lseg)
{}

static bool
pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr *lo,
		struct pnfs_layout_segment *lseg)
{}

void
pnfs_put_lseg(struct pnfs_layout_segment *lseg)
{}
EXPORT_SYMBOL_GPL();

/*
 * is l2 fully contained in l1?
 *   start1                             end1
 *   [----------------------------------)
 *           start2           end2
 *           [----------------)
 */
static bool
pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
		 const struct pnfs_layout_range *l2)
{}

static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
		struct list_head *tmp_list)
{}

/* Returns 1 if lseg is removed from list, 0 otherwise */
static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
			     struct list_head *tmp_list)
{}

static bool
pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
		 const struct pnfs_layout_range *recall_range)
{}

static bool
pnfs_match_lseg_recall(const struct pnfs_layout_segment *lseg,
		const struct pnfs_layout_range *recall_range,
		u32 seq)
{}

/**
 * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
 * @lo: layout header containing the lsegs
 * @tmp_list: list head where doomed lsegs should go
 * @recall_range: optional recall range argument to match (may be NULL)
 * @seq: only invalidate lsegs obtained prior to this sequence (may be 0)
 *
 * Walk the list of lsegs in the layout header, and tear down any that should
 * be destroyed. If "recall_range" is specified then the segment must match
 * that range. If "seq" is non-zero, then only match segments that were handed
 * out at or before that sequence.
 *
 * Returns number of matching invalid lsegs remaining in list after scanning
 * it and purging them.
 */
int
pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
			    struct list_head *tmp_list,
			    const struct pnfs_layout_range *recall_range,
			    u32 seq)
{}

static void
pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
		struct list_head *free_me,
		const struct pnfs_layout_range *range,
		u32 seq)
{}

/* note free_me must contain lsegs from a single layout_hdr */
void
pnfs_free_lseg_list(struct list_head *free_me)
{}

static struct pnfs_layout_hdr *__pnfs_destroy_layout(struct nfs_inode *nfsi)
{}

void pnfs_destroy_layout(struct nfs_inode *nfsi)
{}
EXPORT_SYMBOL_GPL();

static bool pnfs_layout_removed(struct nfs_inode *nfsi,
				struct pnfs_layout_hdr *lo)
{}

void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
{}

static bool
pnfs_layout_add_bulk_destroy_list(struct inode *inode,
		struct list_head *layout_list)
{}

/* Caller must hold rcu_read_lock and clp->cl_lock */
static int
pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
		struct nfs_server *server,
		struct list_head *layout_list)
	__must_hold(&clp->cl_lock)
	__must_hold(RCU)
{}

static int
pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
				   enum pnfs_layout_destroy_mode mode)
{}

int pnfs_layout_destroy_byfsid(struct nfs_client *clp, struct nfs_fsid *fsid,
			       enum pnfs_layout_destroy_mode mode)
{}

static void pnfs_layout_build_destroy_list_byclient(struct nfs_client *clp,
						    struct list_head *list)
{}

static int pnfs_layout_do_destroy_byclid(struct nfs_client *clp,
					 struct list_head *list,
					 enum pnfs_layout_destroy_mode mode)
{}

int pnfs_layout_destroy_byclid(struct nfs_client *clp,
			       enum pnfs_layout_destroy_mode mode)
{}

/*
 * Called by the state manager to remove all layouts established under an
 * expired lease.
 */
void
pnfs_destroy_all_layouts(struct nfs_client *clp)
{}

static void pnfs_layout_build_recover_list_byclient(struct nfs_client *clp,
						    struct list_head *list)
{}

static int pnfs_layout_bulk_list_reboot(struct list_head *list)
{}

int pnfs_layout_handle_reboot(struct nfs_client *clp)
{}

static void
pnfs_set_layout_cred(struct pnfs_layout_hdr *lo, const struct cred *cred)
{}

/* update lo->plh_stateid with new if is more recent */
void
pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
			const struct cred *cred, bool update_barrier)
{}

static bool
pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
		const nfs4_stateid *stateid)
{}

/* lget is set to 1 if called from inside send_layoutget call chain */
static bool
pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
{}

static struct nfs_server *
pnfs_find_server(struct inode *inode, struct nfs_open_context *ctx)
{}

static void nfs4_free_pages(struct page **pages, size_t size)
{}

static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
{}

static struct nfs4_layoutget *
pnfs_alloc_init_layoutget_args(struct inode *ino,
	   struct nfs_open_context *ctx,
	   const nfs4_stateid *stateid,
	   const struct pnfs_layout_range *range,
	   gfp_t gfp_flags)
{}

void pnfs_layoutget_free(struct nfs4_layoutget *lgp)
{}

static void pnfs_clear_layoutcommit(struct inode *inode,
		struct list_head *head)
{}

static void
pnfs_layoutreturn_retry_later_locked(struct pnfs_layout_hdr *lo,
				     const nfs4_stateid *arg_stateid,
				     const struct pnfs_layout_range *range)
{}

void pnfs_layoutreturn_retry_later(struct pnfs_layout_hdr *lo,
				   const nfs4_stateid *arg_stateid,
				   const struct pnfs_layout_range *range)
{}

void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
		const nfs4_stateid *arg_stateid,
		const struct pnfs_layout_range *range,
		const nfs4_stateid *stateid)
{}

static bool
pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
		nfs4_stateid *stateid,
		const struct cred **cred,
		enum pnfs_iomode *iomode)
{}

static void
pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args *args,
		struct pnfs_layout_hdr *lo,
		const nfs4_stateid *stateid,
		enum pnfs_iomode iomode)
{}

static int
pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo,
		       const nfs4_stateid *stateid,
		       const struct cred **pcred,
		       enum pnfs_iomode iomode,
		       unsigned int flags)
{}

/* Return true if layoutreturn is needed */
static bool
pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
{}

static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
{}

/*
 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
 * when the layout segment list is empty.
 *
 * Note that a pnfs_layout_hdr can exist with an empty layout segment
 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
 * deviceid is marked invalid.
 */
int
_pnfs_return_layout(struct inode *ino)
{}

int
pnfs_commit_and_return_layout(struct inode *inode)
{}

static int pnfs_layout_return_on_reboot(struct pnfs_layout_hdr *lo)
{}

bool pnfs_roc(struct inode *ino,
		struct nfs4_layoutreturn_args *args,
		struct nfs4_layoutreturn_res *res,
		const struct cred *cred)
{}

int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
		  struct nfs4_layoutreturn_res **respp, int *ret)
{}

void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
		      struct nfs4_layoutreturn_res *res, int ret)
{}

bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
{}

/*
 * Compare two layout segments for sorting into layout cache.
 * We want to preferentially return RW over RO layouts, so ensure those
 * are seen first.
 */
static s64
pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
	   const struct pnfs_layout_range *l2)
{}

static bool
pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1,
		const struct pnfs_layout_range *l2)
{}

static bool
pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg,
		struct pnfs_layout_segment *old)
{}

void
pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
		   struct pnfs_layout_segment *lseg,
		   bool (*is_after)(const struct pnfs_layout_range *,
			   const struct pnfs_layout_range *),
		   bool (*do_merge)(struct pnfs_layout_segment *,
			   struct pnfs_layout_segment *),
		   struct list_head *free_me)
{}
EXPORT_SYMBOL_GPL();

static void
pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
		   struct pnfs_layout_segment *lseg,
		   struct list_head *free_me)
{}

static struct pnfs_layout_hdr *
alloc_init_layout_hdr(struct inode *ino,
		      struct nfs_open_context *ctx,
		      gfp_t gfp_flags)
{}

static struct pnfs_layout_hdr *
pnfs_find_alloc_layout(struct inode *ino,
		       struct nfs_open_context *ctx,
		       gfp_t gfp_flags)
	__releases(&ino->i_lock)
	__acquires(&ino->i_lock)
{}

/*
 * iomode matching rules:
 * iomode	lseg	strict match
 *                      iomode
 * -----	-----	------ -----
 * ANY		READ	N/A    true
 * ANY		RW	N/A    true
 * RW		READ	N/A    false
 * RW		RW	N/A    true
 * READ		READ	N/A    true
 * READ		RW	true   false
 * READ		RW	false  true
 */
static bool
pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
		 const struct pnfs_layout_range *range,
		 bool strict_iomode)
{}

/*
 * lookup range in layout
 */
static struct pnfs_layout_segment *
pnfs_find_lseg(struct pnfs_layout_hdr *lo,
		struct pnfs_layout_range *range,
		bool strict_iomode)
{}

/*
 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
 * to the MDS or over pNFS
 *
 * The nfs_inode read_io and write_io fields are cumulative counters reset
 * when there are no layout segments. Note that in pnfs_update_layout iomode
 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
 * WRITE request.
 *
 * A return of true means use MDS I/O.
 *
 * From rfc 5661:
 * If a file's size is smaller than the file size threshold, data accesses
 * SHOULD be sent to the metadata server.  If an I/O request has a length that
 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
 * server.  If both file size and I/O size are provided, the client SHOULD
 * reach or exceed  both thresholds before sending its read or write
 * requests to the data server.
 */
static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
				     struct inode *ino, int iomode)
{}

static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
{}

static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
{}

static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
{}

static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
{}

static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
{}

static void _add_to_server_list(struct pnfs_layout_hdr *lo,
				struct nfs_server *server)
{}

/*
 * Layout segment is retreived from the server if not cached.
 * The appropriate layout segment is referenced and returned to the caller.
 */
struct pnfs_layout_segment *
pnfs_update_layout(struct inode *ino,
		   struct nfs_open_context *ctx,
		   loff_t pos,
		   u64 count,
		   enum pnfs_iomode iomode,
		   bool strict_iomode,
		   gfp_t gfp_flags)
{}
EXPORT_SYMBOL_GPL();

static bool
pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
{}

static struct pnfs_layout_hdr *
_pnfs_grab_empty_layout(struct inode *ino, struct nfs_open_context *ctx)
{}

static void _lgopen_prepare_attached(struct nfs4_opendata *data,
				     struct nfs_open_context *ctx)
{}

static void _lgopen_prepare_floating(struct nfs4_opendata *data,
				     struct nfs_open_context *ctx)
{}

void pnfs_lgopen_prepare(struct nfs4_opendata *data,
			 struct nfs_open_context *ctx)
{}

void pnfs_parse_lgopen(struct inode *ino, struct nfs4_layoutget *lgp,
		       struct nfs_open_context *ctx)
{}

void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
{}

struct pnfs_layout_segment *
pnfs_layout_process(struct nfs4_layoutget *lgp)
{}

/**
 * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
 * @lo: pointer to layout header
 * @tmp_list: list header to be used with pnfs_free_lseg_list()
 * @return_range: describe layout segment ranges to be returned
 * @seq: stateid seqid to match
 *
 * This function is mainly intended for use by layoutrecall. It attempts
 * to free the layout segment immediately, or else to mark it for return
 * as soon as its reference count drops to zero.
 *
 * Returns
 * - 0: a layoutreturn needs to be scheduled.
 * - EBUSY: there are layout segment that are still in use.
 * - ENOENT: there are no layout segments that need to be returned.
 */
int
pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
				struct list_head *tmp_list,
				const struct pnfs_layout_range *return_range,
				u32 seq)
{}

static void
pnfs_mark_layout_for_return(struct inode *inode,
			    const struct pnfs_layout_range *range)
{}

void pnfs_error_mark_layout_for_return(struct inode *inode,
				       struct pnfs_layout_segment *lseg)
{}
EXPORT_SYMBOL_GPL();

static bool
pnfs_layout_can_be_returned(struct pnfs_layout_hdr *lo)
{}

static struct pnfs_layout_segment *
pnfs_find_first_lseg(struct pnfs_layout_hdr *lo,
		     const struct pnfs_layout_range *range,
		     enum pnfs_iomode iomode)
{}

/* Find open file states whose mode matches that of the range */
static bool
pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo,
				 const struct pnfs_layout_range *range)
{}

static int pnfs_layout_return_unused_byserver(struct nfs_server *server,
					      void *data)
{}

void
pnfs_layout_return_unused_byclid(struct nfs_client *clp,
				 enum pnfs_iomode iomode)
{}

/* Check if we have we have a valid layout but if there isn't an intersection
 * between the request and the pgio->pg_lseg, put this pgio->pg_lseg away.
 */
void
pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio,
			     struct nfs_page *req)
{}
EXPORT_SYMBOL_GPL();

void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{}
EXPORT_SYMBOL_GPL();

void
pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
			   struct nfs_page *req, u64 wb_size)
{}
EXPORT_SYMBOL_GPL();

void
pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
{}
EXPORT_SYMBOL_GPL();

/*
 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
 * of bytes (maximum @req->wb_bytes) that can be coalesced.
 */
size_t
pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
		     struct nfs_page *prev, struct nfs_page *req)
{}
EXPORT_SYMBOL_GPL();

int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
{}
EXPORT_SYMBOL_GPL();

static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
{}

/*
 * Called by non rpc-based layout drivers
 */
void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
{}
EXPORT_SYMBOL_GPL();

static void
pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
		struct nfs_pgio_header *hdr)
{}

static enum pnfs_try_status
pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
			const struct rpc_call_ops *call_ops,
			struct pnfs_layout_segment *lseg,
			int how)
{}

static void
pnfs_do_write(struct nfs_pageio_descriptor *desc,
	      struct nfs_pgio_header *hdr, int how)
{}

static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
{}

int
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{}
EXPORT_SYMBOL_GPL();

int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
{}
EXPORT_SYMBOL_GPL();

static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
{}

/*
 * Called by non rpc-based layout drivers
 */
void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
{}
EXPORT_SYMBOL_GPL();

static void
pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
		struct nfs_pgio_header *hdr)
{}

/*
 * Call the appropriate parallel I/O subsystem read function.
 */
static enum pnfs_try_status
pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
		       const struct rpc_call_ops *call_ops,
		       struct pnfs_layout_segment *lseg)
{}

/* Resend all requests through pnfs. */
void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr,
			   unsigned int mirror_idx)
{}
EXPORT_SYMBOL_GPL();

static void
pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
{}

static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
{}

int
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{}
EXPORT_SYMBOL_GPL();

static void pnfs_clear_layoutcommitting(struct inode *inode)
{}

/*
 * There can be multiple RW segments.
 */
static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
{}

static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
{}

void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
{}
EXPORT_SYMBOL_GPL();

void
pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
		loff_t end_pos)
{}
EXPORT_SYMBOL_GPL();

void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
{}

/*
 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
 * data to disk to allow the server to recover the data if it crashes.
 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
 * is off, and a COMMIT is sent to a data server, or
 * if WRITEs to a data server return NFS_DATA_SYNC.
 */
int
pnfs_layoutcommit_inode(struct inode *inode, bool sync)
{}
EXPORT_SYMBOL_GPL();

int
pnfs_generic_sync(struct inode *inode, bool datasync)
{}
EXPORT_SYMBOL_GPL();

struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
{}

#if IS_ENABLED(CONFIG_NFS_V4_2)
int
pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
{}
EXPORT_SYMBOL_GPL();
#endif

unsigned int layoutstats_timer;
module_param(layoutstats_timer, uint, 0644);
EXPORT_SYMBOL_GPL();