linux/fs/ceph/addr.c

// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>

#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/pagevec.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/signal.h>
#include <linux/iversion.h>
#include <linux/ktime.h>
#include <linux/netfs.h>

#include "super.h"
#include "mds_client.h"
#include "cache.h"
#include "metric.h"
#include "crypto.h"
#include <linux/ceph/osd_client.h>
#include <linux/ceph/striper.h>

/*
 * Ceph address space ops.
 *
 * There are a few funny things going on here.
 *
 * The page->private field is used to reference a struct
 * ceph_snap_context for _every_ dirty page.  This indicates which
 * snapshot the page was logically dirtied in, and thus which snap
 * context needs to be associated with the osd write during writeback.
 *
 * Similarly, struct ceph_inode_info maintains a set of counters to
 * count dirty pages on the inode.  In the absence of snapshots,
 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
 *
 * When a snapshot is taken (that is, when the client receives
 * notification that a snapshot was taken), each inode with caps and
 * with dirty pages (dirty pages implies there is a cap) gets a new
 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
 * order, new snaps go to the tail).  The i_wrbuffer_ref_head count is
 * moved to capsnap->dirty. (Unless a sync write is currently in
 * progress.  In that case, the capsnap is said to be "pending", new
 * writes cannot start, and the capsnap isn't "finalized" until the
 * write completes (or fails) and a final size/mtime for the inode for
 * that snap can be settled upon.)  i_wrbuffer_ref_head is reset to 0.
 *
 * On writeback, we must submit writes to the osd IN SNAP ORDER.  So,
 * we look for the first capsnap in i_cap_snaps and write out pages in
 * that snap context _only_.  Then we move on to the next capsnap,
 * eventually reaching the "live" or "head" context (i.e., pages that
 * are not yet snapped) and are writing the most recently dirtied
 * pages.
 *
 * Invalidate and so forth must take care to ensure the dirty page
 * accounting is preserved.
 */

#define CONGESTION_ON_THRESH(congestion_kb)
#define CONGESTION_OFF_THRESH(congestion_kb)

static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
					struct folio **foliop, void **_fsdata);

static inline struct ceph_snap_context *page_snap_context(struct page *page)
{}

/*
 * Dirty a page.  Optimistically adjust accounting, on the assumption
 * that we won't race with invalidate.  If we do, readjust.
 */
static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
{}

/*
 * If we are truncating the full folio (i.e. offset == 0), adjust the
 * dirty folio counters appropriately.  Only called if there is private
 * data on the folio.
 */
static void ceph_invalidate_folio(struct folio *folio, size_t offset,
				size_t length)
{}

static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
{}

static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
{}

static void finish_netfs_read(struct ceph_osd_request *req)
{}

static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
{}

static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
{}

static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
{}

static void ceph_netfs_free_request(struct netfs_io_request *rreq)
{}

const struct netfs_request_ops ceph_netfs_ops =;

#ifdef CONFIG_CEPH_FSCACHE
static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
{}

static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
{}
#else
static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
{
}
#endif /* CONFIG_CEPH_FSCACHE */

struct ceph_writeback_ctl
{};

/*
 * Get ref for the oldest snapc for an inode with dirty data... that is, the
 * only snap context we are allowed to write back.
 */
static struct ceph_snap_context *
get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
		   struct ceph_snap_context *page_snapc)
{}

static u64 get_writepages_data_length(struct inode *inode,
				      struct page *page, u64 start)
{}

/*
 * Write a single page, but leave the page locked.
 *
 * If we get a write error, mark the mapping for error, but still adjust the
 * dirty page accounting (i.e., page is no longer dirty).
 */
static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
{}

static int ceph_writepage(struct page *page, struct writeback_control *wbc)
{}

/*
 * async writeback completion handler.
 *
 * If we get an error, set the mapping error bit, but not the individual
 * page error bits.
 */
static void writepages_finish(struct ceph_osd_request *req)
{}

/*
 * initiate async writeback
 */
static int ceph_writepages_start(struct address_space *mapping,
				 struct writeback_control *wbc)
{}



/*
 * See if a given @snapc is either writeable, or already written.
 */
static int context_is_writeable_or_written(struct inode *inode,
					   struct ceph_snap_context *snapc)
{}

/**
 * ceph_find_incompatible - find an incompatible context and return it
 * @page: page being dirtied
 *
 * We are only allowed to write into/dirty a page if the page is
 * clean, or already dirty within the same snap context. Returns a
 * conflicting context if there is one, NULL if there isn't, or a
 * negative error code on other errors.
 *
 * Must be called with page lock held.
 */
static struct ceph_snap_context *
ceph_find_incompatible(struct page *page)
{}

static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
					struct folio **foliop, void **_fsdata)
{}

/*
 * We are only allowed to write into/dirty the page if the page is
 * clean, or already dirty within the same snap context.
 */
static int ceph_write_begin(struct file *file, struct address_space *mapping,
			    loff_t pos, unsigned len,
			    struct page **pagep, void **fsdata)
{}

/*
 * we don't do anything in here that simple_write_end doesn't do
 * except adjust dirty page accounting
 */
static int ceph_write_end(struct file *file, struct address_space *mapping,
			  loff_t pos, unsigned len, unsigned copied,
			  struct page *subpage, void *fsdata)
{}

const struct address_space_operations ceph_aops =;

static void ceph_block_sigs(sigset_t *oldset)
{}

static void ceph_restore_sigs(sigset_t *oldset)
{}

/*
 * vm ops
 */
static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
{}

static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
{}

void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
			   char	*data, size_t len)
{}

int ceph_uninline_data(struct file *file)
{}

static const struct vm_operations_struct ceph_vmops =;

int ceph_mmap(struct file *file, struct vm_area_struct *vma)
{}

enum {};

static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
				s64 pool, struct ceph_string *pool_ns)
{}

int ceph_pool_perm_check(struct inode *inode, int need)
{}

void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc)
{}