linux/block/fops.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 1991, 1992  Linus Torvalds
 * Copyright (C) 2001  Andrea Arcangeli <[email protected]> SuSE
 * Copyright (C) 2016 - 2020 Christoph Hellwig
 */
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/uio.h>
#include <linux/namei.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/falloc.h>
#include <linux/suspend.h>
#include <linux/fs.h>
#include <linux/iomap.h>
#include <linux/module.h>
#include "blk.h"

static inline struct inode *bdev_file_inode(struct file *file)
{}

static blk_opf_t dio_bio_write_op(struct kiocb *iocb)
{}

static bool blkdev_dio_invalid(struct block_device *bdev, loff_t pos,
				struct iov_iter *iter, bool is_atomic)
{}

#define DIO_INLINE_BIO_VECS

static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
		struct iov_iter *iter, struct block_device *bdev,
		unsigned int nr_pages)
{}

enum {};

struct blkdev_dio {};

static struct bio_set blkdev_dio_pool;

static void blkdev_bio_end_io(struct bio *bio)
{}

static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
		struct block_device *bdev, unsigned int nr_pages)
{}

static void blkdev_bio_end_io_async(struct bio *bio)
{}

static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
					struct iov_iter *iter,
					struct block_device *bdev,
					unsigned int nr_pages)
{}

static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{}

static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
{}

static const struct iomap_ops blkdev_iomap_ops =;

#ifdef CONFIG_BUFFER_HEAD
static int blkdev_get_block(struct inode *inode, sector_t iblock,
		struct buffer_head *bh, int create)
{}

/*
 * We cannot call mpage_writepages() as it does not take the buffer lock.
 * We must use block_write_full_folio() directly which holds the buffer
 * lock.  The buffer lock provides the synchronisation with writeback
 * that filesystems rely on when they use the blockdev's mapping.
 */
static int blkdev_writepages(struct address_space *mapping,
		struct writeback_control *wbc)
{}

static int blkdev_read_folio(struct file *file, struct folio *folio)
{}

static void blkdev_readahead(struct readahead_control *rac)
{}

static int blkdev_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{}

static int blkdev_write_end(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned copied, struct page *page,
		void *fsdata)
{}

const struct address_space_operations def_blk_aops =;
#else /* CONFIG_BUFFER_HEAD */
static int blkdev_read_folio(struct file *file, struct folio *folio)
{
	return iomap_read_folio(folio, &blkdev_iomap_ops);
}

static void blkdev_readahead(struct readahead_control *rac)
{
	iomap_readahead(rac, &blkdev_iomap_ops);
}

static int blkdev_map_blocks(struct iomap_writepage_ctx *wpc,
		struct inode *inode, loff_t offset, unsigned int len)
{
	loff_t isize = i_size_read(inode);

	if (WARN_ON_ONCE(offset >= isize))
		return -EIO;
	if (offset >= wpc->iomap.offset &&
	    offset < wpc->iomap.offset + wpc->iomap.length)
		return 0;
	return blkdev_iomap_begin(inode, offset, isize - offset,
				  IOMAP_WRITE, &wpc->iomap, NULL);
}

static const struct iomap_writeback_ops blkdev_writeback_ops = {
	.map_blocks		= blkdev_map_blocks,
};

static int blkdev_writepages(struct address_space *mapping,
		struct writeback_control *wbc)
{
	struct iomap_writepage_ctx wpc = { };

	return iomap_writepages(mapping, wbc, &wpc, &blkdev_writeback_ops);
}

const struct address_space_operations def_blk_aops = {
	.dirty_folio	= filemap_dirty_folio,
	.release_folio		= iomap_release_folio,
	.invalidate_folio	= iomap_invalidate_folio,
	.read_folio		= blkdev_read_folio,
	.readahead		= blkdev_readahead,
	.writepages		= blkdev_writepages,
	.is_partially_uptodate  = iomap_is_partially_uptodate,
	.error_remove_folio	= generic_error_remove_folio,
	.migrate_folio		= filemap_migrate_folio,
};
#endif /* CONFIG_BUFFER_HEAD */

/*
 * for a block special file file_inode(file)->i_size is zero
 * so we compute the size by hand (just as in block_read/write above)
 */
static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
{}

static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
		int datasync)
{}

/**
 * file_to_blk_mode - get block open flags from file flags
 * @file: file whose open flags should be converted
 *
 * Look at file open flags and generate corresponding block open flags from
 * them. The function works both for file just being open (e.g. during ->open
 * callback) and for file that is already open. This is actually non-trivial
 * (see comment in the function).
 */
blk_mode_t file_to_blk_mode(struct file *file)
{}

static int blkdev_open(struct inode *inode, struct file *filp)
{}

static int blkdev_release(struct inode *inode, struct file *filp)
{}

static ssize_t
blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from)
{}

static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
{}

/*
 * Write data to the block device.  Only intended for the block device itself
 * and the raw driver which basically is a fake block device.
 *
 * Does not take i_mutex for the write and thus is not for general purpose
 * use.
 */
static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
{}

static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{}

#define BLKDEV_FALLOC_FL_SUPPORTED

static long blkdev_fallocate(struct file *file, int mode, loff_t start,
			     loff_t len)
{}

static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
{}

const struct file_operations def_blk_fops =;

static __init int blkdev_init(void)
{}
module_init();