linux/block/bdev.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *  Copyright (C) 2001  Andrea Arcangeli <[email protected]> SuSE
 *  Copyright (C) 2016 - 2020 Christoph Hellwig
 */

#include <linux/init.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/major.h>
#include <linux/device_cgroup.h>
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
#include <linux/backing-dev.h>
#include <linux/module.h>
#include <linux/blkpg.h>
#include <linux/magic.h>
#include <linux/buffer_head.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/uio.h>
#include <linux/namei.h>
#include <linux/part_stat.h>
#include <linux/uaccess.h>
#include <linux/stat.h>
#include "../fs/internal.h"
#include "blk.h"

/* Should we allow writing to mounted block devices? */
static bool bdev_allow_write_mounted = IS_ENABLED();

struct bdev_inode {};

static inline struct bdev_inode *BDEV_I(struct inode *inode)
{}

static inline struct inode *BD_INODE(struct block_device *bdev)
{}

struct block_device *I_BDEV(struct inode *inode)
{}
EXPORT_SYMBOL();

struct block_device *file_bdev(struct file *bdev_file)
{}
EXPORT_SYMBOL();

static void bdev_write_inode(struct block_device *bdev)
{}

/* Kill _all_ buffers and pagecache , dirty or not.. */
static void kill_bdev(struct block_device *bdev)
{}

/* Invalidate clean unused buffers and pagecache. */
void invalidate_bdev(struct block_device *bdev)
{}
EXPORT_SYMBOL();

/*
 * Drop all buffers & page cache for given bdev range. This function bails
 * with error if bdev has other exclusive owner (such as filesystem).
 */
int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
			loff_t lstart, loff_t lend)
{}

static void set_init_blocksize(struct block_device *bdev)
{}

int set_blocksize(struct file *file, int size)
{}

EXPORT_SYMBOL();

int sb_set_blocksize(struct super_block *sb, int size)
{}

EXPORT_SYMBOL();

int sb_min_blocksize(struct super_block *sb, int size)
{}

EXPORT_SYMBOL();

int sync_blockdev_nowait(struct block_device *bdev)
{}
EXPORT_SYMBOL_GPL();

/*
 * Write out and wait upon all the dirty data associated with a block
 * device via its mapping.  Does not take the superblock lock.
 */
int sync_blockdev(struct block_device *bdev)
{}
EXPORT_SYMBOL();

int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
{}
EXPORT_SYMBOL();

/**
 * bdev_freeze - lock a filesystem and force it into a consistent state
 * @bdev:	blockdevice to lock
 *
 * If a superblock is found on this device, we take the s_umount semaphore
 * on it to make sure nobody unmounts until the snapshot creation is done.
 * The reference counter (bd_fsfreeze_count) guarantees that only the last
 * unfreeze process can unfreeze the frozen filesystem actually when multiple
 * freeze requests arrive simultaneously. It counts up in bdev_freeze() and
 * count down in bdev_thaw(). When it becomes 0, thaw_bdev() will unfreeze
 * actually.
 *
 * Return: On success zero is returned, negative error code on failure.
 */
int bdev_freeze(struct block_device *bdev)
{}
EXPORT_SYMBOL();

/**
 * bdev_thaw - unlock filesystem
 * @bdev:	blockdevice to unlock
 *
 * Unlocks the filesystem and marks it writeable again after bdev_freeze().
 *
 * Return: On success zero is returned, negative error code on failure.
 */
int bdev_thaw(struct block_device *bdev)
{}
EXPORT_SYMBOL();

/*
 * pseudo-fs
 */

static  __cacheline_aligned_in_smp DEFINE_MUTEX(bdev_lock);
static struct kmem_cache *bdev_cachep __ro_after_init;

static struct inode *bdev_alloc_inode(struct super_block *sb)
{}

static void bdev_free_inode(struct inode *inode)
{}

static void init_once(void *data)
{}

static void bdev_evict_inode(struct inode *inode)
{}

static const struct super_operations bdev_sops =;

static int bd_init_fs_context(struct fs_context *fc)
{}

static struct file_system_type bd_type =;

struct super_block *blockdev_superblock __ro_after_init;
static struct vfsmount *blockdev_mnt __ro_after_init;
EXPORT_SYMBOL_GPL();

void __init bdev_cache_init(void)
{}

struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
{}

void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
{}

void bdev_add(struct block_device *bdev, dev_t dev)
{}

void bdev_unhash(struct block_device *bdev)
{}

void bdev_drop(struct block_device *bdev)
{}

long nr_blockdev_pages(void)
{}

/**
 * bd_may_claim - test whether a block device can be claimed
 * @bdev: block device of interest
 * @holder: holder trying to claim @bdev
 * @hops: holder ops
 *
 * Test whether @bdev can be claimed by @holder.
 *
 * RETURNS:
 * %true if @bdev can be claimed, %false otherwise.
 */
static bool bd_may_claim(struct block_device *bdev, void *holder,
		const struct blk_holder_ops *hops)
{}

/**
 * bd_prepare_to_claim - claim a block device
 * @bdev: block device of interest
 * @holder: holder trying to claim @bdev
 * @hops: holder ops.
 *
 * Claim @bdev.  This function fails if @bdev is already claimed by another
 * holder and waits if another claiming is in progress. return, the caller
 * has ownership of bd_claiming and bd_holder[s].
 *
 * RETURNS:
 * 0 if @bdev can be claimed, -EBUSY otherwise.
 */
int bd_prepare_to_claim(struct block_device *bdev, void *holder,
		const struct blk_holder_ops *hops)
{}
EXPORT_SYMBOL_GPL(); /* only for the loop driver */

static void bd_clear_claiming(struct block_device *whole, void *holder)
{}

/**
 * bd_finish_claiming - finish claiming of a block device
 * @bdev: block device of interest
 * @holder: holder that has claimed @bdev
 * @hops: block device holder operations
 *
 * Finish exclusive open of a block device. Mark the device as exlusively
 * open by the holder and wake up all waiters for exclusive open to finish.
 */
static void bd_finish_claiming(struct block_device *bdev, void *holder,
		const struct blk_holder_ops *hops)
{}

/**
 * bd_abort_claiming - abort claiming of a block device
 * @bdev: block device of interest
 * @holder: holder that has claimed @bdev
 *
 * Abort claiming of a block device when the exclusive open failed. This can be
 * also used when exclusive open is not actually desired and we just needed
 * to block other exclusive openers for a while.
 */
void bd_abort_claiming(struct block_device *bdev, void *holder)
{}
EXPORT_SYMBOL();

static void bd_end_claim(struct block_device *bdev, void *holder)
{}

static void blkdev_flush_mapping(struct block_device *bdev)
{}

static void blkdev_put_whole(struct block_device *bdev)
{}

static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
{}

static int blkdev_get_part(struct block_device *part, blk_mode_t mode)
{}

int bdev_permission(dev_t dev, blk_mode_t mode, void *holder)
{}

static void blkdev_put_part(struct block_device *part)
{}

struct block_device *blkdev_get_no_open(dev_t dev)
{}

void blkdev_put_no_open(struct block_device *bdev)
{}

static bool bdev_writes_blocked(struct block_device *bdev)
{}

static void bdev_block_writes(struct block_device *bdev)
{}

static void bdev_unblock_writes(struct block_device *bdev)
{}

static bool bdev_may_open(struct block_device *bdev, blk_mode_t mode)
{}

static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode)
{}

static inline bool bdev_unclaimed(const struct file *bdev_file)
{}

static void bdev_yield_write_access(struct file *bdev_file)
{}

/**
 * bdev_open - open a block device
 * @bdev: block device to open
 * @mode: open mode (BLK_OPEN_*)
 * @holder: exclusive holder identifier
 * @hops: holder operations
 * @bdev_file: file for the block device
 *
 * Open the block device. If @holder is not %NULL, the block device is opened
 * with exclusive access.  Exclusive opens may nest for the same @holder.
 *
 * CONTEXT:
 * Might sleep.
 *
 * RETURNS:
 * zero on success, -errno on failure.
 */
int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
	      const struct blk_holder_ops *hops, struct file *bdev_file)
{}

/*
 * If BLK_OPEN_WRITE_IOCTL is set then this is a historical quirk
 * associated with the floppy driver where it has allowed ioctls if the
 * file was opened for writing, but does not allow reads or writes.
 * Make sure that this quirk is reflected in @f_flags.
 *
 * It can also happen if a block device is opened as O_RDWR | O_WRONLY.
 */
static unsigned blk_to_file_flags(blk_mode_t mode)
{}

struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
				   const struct blk_holder_ops *hops)
{}
EXPORT_SYMBOL();

struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
				    void *holder,
				    const struct blk_holder_ops *hops)
{}
EXPORT_SYMBOL();

static inline void bd_yield_claim(struct file *bdev_file)
{}

void bdev_release(struct file *bdev_file)
{}

/**
 * bdev_fput - yield claim to the block device and put the file
 * @bdev_file: open block device
 *
 * Yield claim on the block device and put the file. Ensure that the
 * block device can be reclaimed before the file is closed which is a
 * deferred operation.
 */
void bdev_fput(struct file *bdev_file)
{}
EXPORT_SYMBOL();

/**
 * lookup_bdev() - Look up a struct block_device by name.
 * @pathname: Name of the block device in the filesystem.
 * @dev: Pointer to the block device's dev_t, if found.
 *
 * Lookup the block device's dev_t at @pathname in the current
 * namespace if possible and return it in @dev.
 *
 * Context: May sleep.
 * Return: 0 if succeeded, negative errno otherwise.
 */
int lookup_bdev(const char *pathname, dev_t *dev)
{}
EXPORT_SYMBOL();

/**
 * bdev_mark_dead - mark a block device as dead
 * @bdev: block device to operate on
 * @surprise: indicate a surprise removal
 *
 * Tell the file system that this devices or media is dead.  If @surprise is set
 * to %true the device or media is already gone, if not we are preparing for an
 * orderly removal.
 *
 * This calls into the file system, which then typicall syncs out all dirty data
 * and writes back inodes and then invalidates any cached data in the inodes on
 * the file system.  In addition we also invalidate the block device mapping.
 */
void bdev_mark_dead(struct block_device *bdev, bool surprise)
{}
/*
 * New drivers should not use this directly.  There are some drivers however
 * that needs this for historical reasons. For example, the DASD driver has
 * historically had a shutdown to offline mode that doesn't actually remove the
 * gendisk that otherwise looks a lot like a safe device removal.
 */
EXPORT_SYMBOL_GPL();

void sync_bdevs(bool wait)
{}

/*
 * Handle STATX_{DIOALIGN, WRITE_ATOMIC} for block devices.
 */
void bdev_statx(struct path *path, struct kstat *stat,
		u32 request_mask)
{}

bool disk_live(struct gendisk *disk)
{}
EXPORT_SYMBOL_GPL();

unsigned int block_size(struct block_device *bdev)
{}
EXPORT_SYMBOL_GPL();

static int __init setup_bdev_allow_write_mounted(char *str)
{}
__setup();