linux/fs/bcachefs/fs-io-buffered.c

// SPDX-License-Identifier: GPL-2.0
#ifndef NO_BCACHEFS_FS

#include "bcachefs.h"
#include "alloc_foreground.h"
#include "bkey_buf.h"
#include "fs-io.h"
#include "fs-io-buffered.h"
#include "fs-io-direct.h"
#include "fs-io-pagecache.h"
#include "io_read.h"
#include "io_write.h"

#include <linux/backing-dev.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>

static inline bool bio_full(struct bio *bio, unsigned len)
{}

/* readpage(s): */

static void bch2_readpages_end_io(struct bio *bio)
{}

struct readpages_iter {};

static int readpages_iter_init(struct readpages_iter *iter,
			       struct readahead_control *ractl)
{}

static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
{}

static inline void readpage_iter_advance(struct readpages_iter *iter)
{}

static bool extent_partial_reads_expensive(struct bkey_s_c k)
{}

static int readpage_bio_extend(struct btree_trans *trans,
			       struct readpages_iter *iter,
			       struct bio *bio,
			       unsigned sectors_this_extent,
			       bool get_more)
{}

static void bchfs_read(struct btree_trans *trans,
		       struct bch_read_bio *rbio,
		       subvol_inum inum,
		       struct readpages_iter *readpages_iter)
{}

void bch2_readahead(struct readahead_control *ractl)
{}

static void bch2_read_single_folio_end_io(struct bio *bio)
{}

int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
{}

int bch2_read_folio(struct file *file, struct folio *folio)
{}

/* writepages: */

struct bch_writepage_io {};

struct bch_writepage_state {};

static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
								  struct bch_inode_info *inode)
{}

/*
 * Determine when a writepage io is full. We have to limit writepage bios to a
 * single page per bvec (i.e. 1MB with 4k pages) because that is the limit to
 * what the bounce path in bch2_write_extent() can handle. In theory we could
 * loosen this restriction for non-bounce I/O, but we don't have that context
 * here. Ideally, we can up this limit and make it configurable in the future
 * when the bounce path can be enhanced to accommodate larger source bios.
 */
static inline bool bch_io_full(struct bch_writepage_io *io, unsigned len)
{}

static void bch2_writepage_io_done(struct bch_write_op *op)
{}

static void bch2_writepage_do_io(struct bch_writepage_state *w)
{}

/*
 * Get a bch_writepage_io and add @page to it - appending to an existing one if
 * possible, else allocating a new one:
 */
static void bch2_writepage_io_alloc(struct bch_fs *c,
				    struct writeback_control *wbc,
				    struct bch_writepage_state *w,
				    struct bch_inode_info *inode,
				    u64 sector,
				    unsigned nr_replicas)
{}

static int __bch2_writepage(struct folio *folio,
			    struct writeback_control *wbc,
			    void *data)
{}

int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
{}

/* buffered writes: */

int bch2_write_begin(struct file *file, struct address_space *mapping,
		     loff_t pos, unsigned len,
		     struct page **pagep, void **fsdata)
{}

int bch2_write_end(struct file *file, struct address_space *mapping,
		   loff_t pos, unsigned len, unsigned copied,
		   struct page *page, void *fsdata)
{}

static noinline void folios_trunc(folios *fs, struct folio **fi)
{}

static int __bch2_buffered_write(struct bch_inode_info *inode,
				 struct address_space *mapping,
				 struct iov_iter *iter,
				 loff_t pos, unsigned len,
				 bool inode_locked)
{}

static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
{}

ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *iter)
{}

void bch2_fs_fs_io_buffered_exit(struct bch_fs *c)
{}

int bch2_fs_fs_io_buffered_init(struct bch_fs *c)
{}

#endif /* NO_BCACHEFS_FS */