linux/fs/nfs/write.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * linux/fs/nfs/write.c
 *
 * Write file data over NFS.
 *
 * Copyright (C) 1996, 1997, Olaf Kirch <[email protected]>
 */

#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/writeback.h>
#include <linux/swap.h>
#include <linux/migrate.h>

#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/nfs_page.h>
#include <linux/backing-dev.h>
#include <linux/export.h>
#include <linux/freezer.h>
#include <linux/wait.h>
#include <linux/iversion.h>
#include <linux/filelock.h>

#include <linux/uaccess.h>
#include <linux/sched/mm.h>

#include "delegation.h"
#include "internal.h"
#include "iostat.h"
#include "nfs4_fs.h"
#include "fscache.h"
#include "pnfs.h"

#include "nfstrace.h"

#define NFSDBG_FACILITY

#define MIN_POOL_WRITE
#define MIN_POOL_COMMIT

struct nfs_io_completion {};

/*
 * Local function declarations
 */
static void nfs_redirty_request(struct nfs_page *req);
static const struct rpc_call_ops nfs_commit_ops;
static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
static const struct nfs_rw_ops nfs_rw_write_ops;
static void nfs_inode_remove_request(struct nfs_page *req);
static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
				     struct nfs_page *req);
static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
				      struct inode *inode);

static struct kmem_cache *nfs_wdata_cachep;
static mempool_t *nfs_wdata_mempool;
static struct kmem_cache *nfs_cdata_cachep;
static mempool_t *nfs_commit_mempool;

struct nfs_commit_data *nfs_commitdata_alloc(void)
{}
EXPORT_SYMBOL_GPL();

void nfs_commit_free(struct nfs_commit_data *p)
{}
EXPORT_SYMBOL_GPL();

static struct nfs_pgio_header *nfs_writehdr_alloc(void)
{}

static void nfs_writehdr_free(struct nfs_pgio_header *hdr)
{}

static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags)
{}

static void nfs_io_completion_init(struct nfs_io_completion *ioc,
		void (*complete)(void *), void *data)
{}

static void nfs_io_completion_release(struct kref *kref)
{}

static void nfs_io_completion_get(struct nfs_io_completion *ioc)
{}

static void nfs_io_completion_put(struct nfs_io_completion *ioc)
{}

/**
 * nfs_folio_find_head_request - find head request associated with a folio
 * @folio: pointer to folio
 *
 * must be called while holding the inode lock.
 *
 * returns matching head request with reference held, or NULL if not found.
 */
static struct nfs_page *nfs_folio_find_head_request(struct folio *folio)
{}

/* Adjust the file length if we're writing beyond the end */
static void nfs_grow_file(struct folio *folio, unsigned int offset,
			  unsigned int count)
{}

/* A writeback failed: mark the page as bad, and invalidate the page cache */
static void nfs_set_pageerror(struct address_space *mapping)
{}

static void nfs_mapping_set_error(struct folio *folio, int error)
{}

/*
 * nfs_page_group_search_locked
 * @head - head request of page group
 * @page_offset - offset into page
 *
 * Search page group with head @head to find a request that contains the
 * page offset @page_offset.
 *
 * Returns a pointer to the first matching nfs request, or NULL if no
 * match is found.
 *
 * Must be called with the page group lock held
 */
static struct nfs_page *
nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
{}

/*
 * nfs_page_group_covers_page
 * @head - head request of page group
 *
 * Return true if the page group with head @head covers the whole page,
 * returns false otherwise
 */
static bool nfs_page_group_covers_page(struct nfs_page *req)
{}

/* We can set the PG_uptodate flag if we see that a write request
 * covers the full page.
 */
static void nfs_mark_uptodate(struct nfs_page *req)
{}

static int wb_priority(struct writeback_control *wbc)
{}

/*
 * NFS congestion control
 */

int nfs_congestion_kb;

#define NFS_CONGESTION_ON_THRESH
#define NFS_CONGESTION_OFF_THRESH

static void nfs_folio_set_writeback(struct folio *folio)
{}

static void nfs_folio_end_writeback(struct folio *folio)
{}

static void nfs_page_end_writeback(struct nfs_page *req)
{}

/*
 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
 *
 * @destroy_list - request list (using wb_this_page) terminated by @old_head
 * @old_head - the old head of the list
 *
 * All subrequests must be locked and removed from all lists, so at this point
 * they are only "active" in this function, and possibly in nfs_wait_on_request
 * with a reference held by some other context.
 */
static void
nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
				 struct nfs_page *old_head,
				 struct inode *inode)
{}

/*
 * nfs_join_page_group - destroy subrequests of the head req
 * @head: the page used to lookup the "page group" of nfs_page structures
 * @inode: Inode to which the request belongs.
 *
 * This function joins all sub requests to the head request by first
 * locking all requests in the group, cancelling any pending operations
 * and finally updating the head request to cover the whole range covered by
 * the (former) group.  All subrequests are removed from any write or commit
 * lists, unlinked from the group and destroyed.
 */
void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo,
			 struct inode *inode)
{}

/**
 * nfs_wait_on_request - Wait for a request to complete.
 * @req: request to wait upon.
 *
 * Interruptible by fatal signals only.
 * The user is responsible for holding a count on the request.
 */
static int nfs_wait_on_request(struct nfs_page *req)
{}

/*
 * nfs_unroll_locks -  unlock all newly locked reqs and wait on @req
 * @head: head request of page group, must be holding head lock
 * @req: request that couldn't lock and needs to wait on the req bit lock
 *
 * This is a helper function for nfs_lock_and_join_requests
 * returns 0 on success, < 0 on error.
 */
static void
nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
{}

/*
 * nfs_page_group_lock_subreq -  try to lock a subrequest
 * @head: head request of page group
 * @subreq: request to lock
 *
 * This is a helper function for nfs_lock_and_join_requests which
 * must be called with the head request and page group both locked.
 * On error, it returns with the page group unlocked.
 */
static int
nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq)
{}

/*
 * nfs_lock_and_join_requests - join all subreqs to the head req
 * @folio: the folio used to lookup the "page group" of nfs_page structures
 *
 * This function joins all sub requests to the head request by first
 * locking all requests in the group, cancelling any pending operations
 * and finally updating the head request to cover the whole range covered by
 * the (former) group.  All subrequests are removed from any write or commit
 * lists, unlinked from the group and destroyed.
 *
 * Returns a locked, referenced pointer to the head request - which after
 * this call is guaranteed to be the only request associated with the page.
 * Returns NULL if no requests are found for @folio, or a ERR_PTR if an
 * error was encountered.
 */
static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
{}

static void nfs_write_error(struct nfs_page *req, int error)
{}

/*
 * Find an associated nfs write request, and prepare to flush it out
 * May return an error if the user signalled nfs_wait_on_request().
 */
static int nfs_page_async_flush(struct folio *folio,
				struct writeback_control *wbc,
				struct nfs_pageio_descriptor *pgio)
{}

static int nfs_do_writepage(struct folio *folio, struct writeback_control *wbc,
			    struct nfs_pageio_descriptor *pgio)
{}

/*
 * Write an mmapped page to the server.
 */
static int nfs_writepage_locked(struct folio *folio,
				struct writeback_control *wbc)
{}

static int nfs_writepages_callback(struct folio *folio,
				   struct writeback_control *wbc, void *data)
{}

static void nfs_io_completion_commit(void *inode)
{}

int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{}

/*
 * Insert a write request into an inode
 */
static void nfs_inode_add_request(struct nfs_page *req)
{}

/*
 * Remove a write request from an inode
 */
static void nfs_inode_remove_request(struct nfs_page *req)
{}

static void nfs_mark_request_dirty(struct nfs_page *req)
{}

/**
 * nfs_request_add_commit_list_locked - add request to a commit list
 * @req: pointer to a struct nfs_page
 * @dst: commit list head
 * @cinfo: holds list lock and accounting info
 *
 * This sets the PG_CLEAN bit, updates the cinfo count of
 * number of outstanding requests requiring a commit as well as
 * the MM page stats.
 *
 * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the
 * nfs_page lock.
 */
void
nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
			    struct nfs_commit_info *cinfo)
{}
EXPORT_SYMBOL_GPL();

/**
 * nfs_request_add_commit_list - add request to a commit list
 * @req: pointer to a struct nfs_page
 * @cinfo: holds list lock and accounting info
 *
 * This sets the PG_CLEAN bit, updates the cinfo count of
 * number of outstanding requests requiring a commit as well as
 * the MM page stats.
 *
 * The caller must _not_ hold the cinfo->lock, but must be
 * holding the nfs_page lock.
 */
void
nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
{}
EXPORT_SYMBOL_GPL();

/**
 * nfs_request_remove_commit_list - Remove request from a commit list
 * @req: pointer to a nfs_page
 * @cinfo: holds list lock and accounting info
 *
 * This clears the PG_CLEAN bit, and updates the cinfo's count of
 * number of outstanding requests requiring a commit
 * It does not update the MM page stats.
 *
 * The caller _must_ hold the cinfo->lock and the nfs_page lock.
 */
void
nfs_request_remove_commit_list(struct nfs_page *req,
			       struct nfs_commit_info *cinfo)
{}
EXPORT_SYMBOL_GPL();

static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
				      struct inode *inode)
{}

void nfs_init_cinfo(struct nfs_commit_info *cinfo,
		    struct inode *inode,
		    struct nfs_direct_req *dreq)
{}
EXPORT_SYMBOL_GPL();

/*
 * Add a request to the inode's commit list.
 */
void
nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
			struct nfs_commit_info *cinfo, u32 ds_commit_idx)
{}

static void nfs_folio_clear_commit(struct folio *folio)
{}

/* Called holding the request lock on @req */
static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
				     struct nfs_page *req)
{}

int nfs_write_need_commit(struct nfs_pgio_header *hdr)
{}

static void nfs_async_write_init(struct nfs_pgio_header *hdr)
{}

static void nfs_write_completion(struct nfs_pgio_header *hdr)
{}

unsigned long
nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
{}

/* NFS_I(cinfo->inode)->commit_mutex held by caller */
int
nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
		     struct nfs_commit_info *cinfo, int max)
{}
EXPORT_SYMBOL_GPL();

/*
 * nfs_scan_commit - Scan an inode for commit requests
 * @inode: NFS inode to scan
 * @dst: mds destination list
 * @cinfo: mds and ds lists of reqs ready to commit
 *
 * Moves requests from the inode's 'commit' request list.
 * The requests are *not* checked to ensure that they form a contiguous set.
 */
int
nfs_scan_commit(struct inode *inode, struct list_head *dst,
		struct nfs_commit_info *cinfo)
{}

/*
 * Search for an existing write request, and attempt to update
 * it to reflect a new dirty region on a given page.
 *
 * If the attempt fails, then the existing request is flushed out
 * to disk.
 */
static struct nfs_page *nfs_try_to_update_request(struct folio *folio,
						  unsigned int offset,
						  unsigned int bytes)
{}

/*
 * Try to update an existing write request, or create one if there is none.
 *
 * Note: Should always be called with the Page Lock held to prevent races
 * if we have to add a new request. Also assumes that the caller has
 * already called nfs_flush_incompatible() if necessary.
 */
static struct nfs_page *nfs_setup_write_request(struct nfs_open_context *ctx,
						struct folio *folio,
						unsigned int offset,
						unsigned int bytes)
{}

static int nfs_writepage_setup(struct nfs_open_context *ctx,
			       struct folio *folio, unsigned int offset,
			       unsigned int count)
{}

int nfs_flush_incompatible(struct file *file, struct folio *folio)
{}

/*
 * Avoid buffered writes when a open context credential's key would
 * expire soon.
 *
 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL.
 *
 * Return 0 and set a credential flag which triggers the inode to flush
 * and performs  NFS_FILE_SYNC writes if the key will expired within
 * RPC_KEY_EXPIRE_TIMEO.
 */
int
nfs_key_timeout_notify(struct file *filp, struct inode *inode)
{}

/*
 * Test if the open context credential key is marked to expire soon.
 */
bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode)
{}

/*
 * If the page cache is marked as unsafe or invalid, then we can't rely on
 * the PageUptodate() flag. In this case, we will need to turn off
 * write optimisations that depend on the page contents being correct.
 */
static bool nfs_folio_write_uptodate(struct folio *folio, unsigned int pagelen)
{}

static bool
is_whole_file_wrlock(struct file_lock *fl)
{}

/* If we know the page is up to date, and we're not using byte range locks (or
 * if we have the whole file locked for writing), it may be more efficient to
 * extend the write to cover the entire page in order to avoid fragmentation
 * inefficiencies.
 *
 * If the file is opened for synchronous writes then we can just skip the rest
 * of the checks.
 */
static int nfs_can_extend_write(struct file *file, struct folio *folio,
				unsigned int pagelen)
{}

/*
 * Update and possibly write a cached page of an NFS file.
 *
 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
 * things with a page scheduled for an RPC call (e.g. invalidate it).
 */
int nfs_update_folio(struct file *file, struct folio *folio,
		     unsigned int offset, unsigned int count)
{}

static int flush_task_priority(int how)
{}

static void nfs_initiate_write(struct nfs_pgio_header *hdr,
			       struct rpc_message *msg,
			       const struct nfs_rpc_ops *rpc_ops,
			       struct rpc_task_setup *task_setup_data, int how)
{}

/* If a nfs_flush_* function fails, it should remove reqs from @head and
 * call this on each, which will prepare them to be retried on next
 * writeback using standard nfs.
 */
static void nfs_redirty_request(struct nfs_page *req)
{}

static void nfs_async_write_error(struct list_head *head, int error)
{}

static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
{}

static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops =;

void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
			       struct inode *inode, int ioflags, bool force_mds,
			       const struct nfs_pgio_completion_ops *compl_ops)
{}
EXPORT_SYMBOL_GPL();

void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
{}
EXPORT_SYMBOL_GPL();


void nfs_commit_prepare(struct rpc_task *task, void *calldata)
{}

static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
		struct nfs_fattr *fattr)
{}

void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
{}
EXPORT_SYMBOL_GPL();

/*
 * This function is called when the WRITE call is complete.
 */
static int nfs_writeback_done(struct rpc_task *task,
			      struct nfs_pgio_header *hdr,
			      struct inode *inode)
{}

/*
 * This function is called when the WRITE call is complete.
 */
static void nfs_writeback_result(struct rpc_task *task,
				 struct nfs_pgio_header *hdr)
{}

static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
{}

void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
{}

bool nfs_commit_end(struct nfs_mds_commit_info *cinfo)
{}

void nfs_commitdata_release(struct nfs_commit_data *data)
{}
EXPORT_SYMBOL_GPL();

int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
			const struct nfs_rpc_ops *nfs_ops,
			const struct rpc_call_ops *call_ops,
			int how, int flags)
{}
EXPORT_SYMBOL_GPL();

static loff_t nfs_get_lwb(struct list_head *head)
{}

/*
 * Set up the argument/result storage required for the RPC call.
 */
void nfs_init_commit(struct nfs_commit_data *data,
		     struct list_head *head,
		     struct pnfs_layout_segment *lseg,
		     struct nfs_commit_info *cinfo)
{}
EXPORT_SYMBOL_GPL();

void nfs_retry_commit(struct list_head *page_list,
		      struct pnfs_layout_segment *lseg,
		      struct nfs_commit_info *cinfo,
		      u32 ds_commit_idx)
{}
EXPORT_SYMBOL_GPL();

static void nfs_commit_resched_write(struct nfs_commit_info *cinfo,
				     struct nfs_page *req)
{}

/*
 * Commit dirty pages
 */
static int
nfs_commit_list(struct inode *inode, struct list_head *head, int how,
		struct nfs_commit_info *cinfo)
{}

/*
 * COMMIT call returned
 */
static void nfs_commit_done(struct rpc_task *task, void *calldata)
{}

static void nfs_commit_release_pages(struct nfs_commit_data *data)
{}

static void nfs_commit_release(void *calldata)
{}

static const struct rpc_call_ops nfs_commit_ops =;

static const struct nfs_commit_completion_ops nfs_commit_completion_ops =;

int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
			    int how, struct nfs_commit_info *cinfo)
{}

static int __nfs_commit_inode(struct inode *inode, int how,
		struct writeback_control *wbc)
{}

int nfs_commit_inode(struct inode *inode, int how)
{}
EXPORT_SYMBOL_GPL();

int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{}
EXPORT_SYMBOL_GPL();

/*
 * Wrapper for filemap_write_and_wait_range()
 *
 * Needed for pNFS in order to ensure data becomes visible to the
 * client.
 */
int nfs_filemap_write_and_wait_range(struct address_space *mapping,
		loff_t lstart, loff_t lend)
{}
EXPORT_SYMBOL_GPL();

/*
 * flush the inode to disk.
 */
int nfs_wb_all(struct inode *inode)
{}
EXPORT_SYMBOL_GPL();

int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio)
{}

/**
 * nfs_wb_folio - Write back all requests on one page
 * @inode: pointer to page
 * @folio: pointer to folio
 *
 * Assumes that the folio has been locked by the caller, and will
 * not unlock it.
 */
int nfs_wb_folio(struct inode *inode, struct folio *folio)
{}

#ifdef CONFIG_MIGRATION
int nfs_migrate_folio(struct address_space *mapping, struct folio *dst,
		struct folio *src, enum migrate_mode mode)
{}
#endif

int __init nfs_init_writepagecache(void)
{}

void nfs_destroy_writepagecache(void)
{}

static const struct nfs_rw_ops nfs_rw_write_ops =;