linux/include/linux/blkdev.h

/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Portions Copyright (C) 1992 Drew Eckhardt
 */
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H

#include <linux/types.h>
#include <linux/blk_types.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/llist.h>
#include <linux/minmax.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
#include <linux/bio.h>
#include <linux/gfp.h>
#include <linux/kdev_t.h>
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
#include <linux/blkzoned.h>
#include <linux/sched.h>
#include <linux/sbitmap.h>
#include <linux/uuid.h>
#include <linux/xarray.h>
#include <linux/file.h>

struct module;
struct request_queue;
struct elevator_queue;
struct blk_trace;
struct request;
struct sg_io_hdr;
struct blkcg_gq;
struct blk_flush_queue;
struct kiocb;
struct pr_ops;
struct rq_qos;
struct blk_queue_stats;
struct blk_stat_callback;
struct blk_crypto_profile;

extern const struct device_type disk_type;
extern const struct device_type part_type;
extern const struct class block_class;

/*
 * Maximum number of blkcg policies allowed to be registered concurrently.
 * Defined here to simplify include dependency.
 */
#define BLKCG_MAX_POLS

#define DISK_MAX_PARTS
#define DISK_NAME_LEN

#define PARTITION_META_INFO_VOLNAMELTH
/*
 * Enough for the string representation of any kind of UUID plus NULL.
 * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
 */
#define PARTITION_META_INFO_UUIDLTH

struct partition_meta_info {};

/**
 * DOC: genhd capability flags
 *
 * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
 * removable media.  When set, the device remains present even when media is not
 * inserted.  Shall not be set for devices which are removed entirely when the
 * media is removed.
 *
 * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
 * doesn't appear in sysfs, and can't be opened from userspace or using
 * blkdev_get*. Used for the underlying components of multipath devices.
 *
 * ``GENHD_FL_NO_PART``: partition support is disabled.  The kernel will not
 * scan for partitions from add_disk, and users can't add partitions manually.
 *
 */
enum {};

enum {};

enum {};

struct disk_events;
struct badblocks;

enum blk_integrity_checksum {} __packed ;

struct blk_integrity {};

blk_mode_t;

/* open for reading */
#define BLK_OPEN_READ
/* open for writing */
#define BLK_OPEN_WRITE
/* open exclusively (vs other exclusive openers */
#define BLK_OPEN_EXCL
/* opened with O_NDELAY */
#define BLK_OPEN_NDELAY
/* open for "writes" only for ioctls (specialy hack for floppy.c) */
#define BLK_OPEN_WRITE_IOCTL
/* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
#define BLK_OPEN_RESTRICT_WRITES
/* return partition scanning errors */
#define BLK_OPEN_STRICT_SCAN

struct gendisk {};

/**
 * disk_openers - returns how many openers are there for a disk
 * @disk: disk to check
 *
 * This returns the number of openers for a disk.  Note that this value is only
 * stable if disk->open_mutex is held.
 *
 * Note: Due to a quirk in the block layer open code, each open partition is
 * only counted once even if there are multiple openers.
 */
static inline unsigned int disk_openers(struct gendisk *disk)
{}

/**
 * disk_has_partscan - return %true if partition scanning is enabled on a disk
 * @disk: disk to check
 *
 * Returns %true if partitions scanning is enabled for @disk, or %false if
 * partition scanning is disabled either permanently or temporarily.
 */
static inline bool disk_has_partscan(struct gendisk *disk)
{}

/*
 * The gendisk is refcounted by the part0 block_device, and the bd_device
 * therein is also used for device model presentation in sysfs.
 */
#define dev_to_disk(device)
#define disk_to_dev(disk)

#if IS_REACHABLE(CONFIG_CDROM)
#define disk_to_cdi(disk)
#else
#define disk_to_cdi
#endif

static inline dev_t disk_devt(struct gendisk *disk)
{}

/* blk_validate_limits() validates bsize, so drivers don't usually need to */
static inline int blk_validate_block_size(unsigned long bsize)
{}

static inline bool blk_op_is_passthrough(blk_opf_t op)
{}

/* flags set by the driver in queue_limits.features */
blk_features_t;

/* supports a volatile write cache */
#define BLK_FEAT_WRITE_CACHE

/* supports passing on the FUA bit */
#define BLK_FEAT_FUA

/* rotational device (hard drive or floppy) */
#define BLK_FEAT_ROTATIONAL

/* contributes to the random number pool */
#define BLK_FEAT_ADD_RANDOM

/* do disk/partitions IO accounting */
#define BLK_FEAT_IO_STAT

/* don't modify data until writeback is done */
#define BLK_FEAT_STABLE_WRITES

/* always completes in submit context */
#define BLK_FEAT_SYNCHRONOUS

/* supports REQ_NOWAIT */
#define BLK_FEAT_NOWAIT

/* supports DAX */
#define BLK_FEAT_DAX

/* supports I/O polling */
#define BLK_FEAT_POLL

/* is a zoned device */
#define BLK_FEAT_ZONED

/* supports PCI(e) p2p requests */
#define BLK_FEAT_PCI_P2PDMA

/* skip this queue in blk_mq_(un)quiesce_tagset */
#define BLK_FEAT_SKIP_TAGSET_QUIESCE

/* bounce all highmem pages */
#define BLK_FEAT_BOUNCE_HIGH

/* undocumented magic for bcache */
#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE

/*
 * Flags automatically inherited when stacking limits.
 */
#define BLK_FEAT_INHERIT_MASK

/* internal flags in queue_limits.flags */
blk_flags_t;

/* do not send FLUSH/FUA commands despite advertising a write cache */
#define BLK_FLAG_WRITE_CACHE_DISABLED

/* I/O topology is misaligned */
#define BLK_FLAG_MISALIGNED

struct queue_limits {};

report_zones_cb;

#define BLK_ALL_ZONES
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
		unsigned int nr_zones, report_zones_cb cb, void *data);
int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
		sector_t sectors, sector_t nr_sectors);
int blk_revalidate_disk_zones(struct gendisk *disk);

/*
 * Independent access ranges: struct blk_independent_access_range describes
 * a range of contiguous sectors that can be accessed using device command
 * execution resources that are independent from the resources used for
 * other access ranges. This is typically found with single-LUN multi-actuator
 * HDDs where each access range is served by a different set of heads.
 * The set of independent ranges supported by the device is defined using
 * struct blk_independent_access_ranges. The independent ranges must not overlap
 * and must include all sectors within the disk capacity (no sector holes
 * allowed).
 * For a device with multiple ranges, requests targeting sectors in different
 * ranges can be executed in parallel. A request can straddle an access range
 * boundary.
 */
struct blk_independent_access_range {};

struct blk_independent_access_ranges {};

struct request_queue {};

/* Keep blk_queue_flag_name[] in sync with the definitions below */
enum {};

#define QUEUE_FLAG_MQ_DEFAULT

void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);

#define blk_queue_dying(q)
#define blk_queue_init_done(q)
#define blk_queue_nomerges(q)
#define blk_queue_noxmerges(q)
#define blk_queue_nonrot(q)
#define blk_queue_io_stat(q)
#define blk_queue_dax(q)
#define blk_queue_pci_p2pdma(q)
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
#define blk_queue_rq_alloc_time(q)
#else
#define blk_queue_rq_alloc_time
#endif

#define blk_noretry_request(rq)
#define blk_queue_quiesced(q)
#define blk_queue_pm_only(q)
#define blk_queue_registered(q)
#define blk_queue_sq_sched(q)
#define blk_queue_skip_tagset_quiesce(q)

extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);

#define list_entry_rq(ptr)

#define dma_map_bvec(dev, bv, dir, attrs)

static inline bool queue_is_mq(struct request_queue *q)
{}

#ifdef CONFIG_PM
static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{}
#else
static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{
	return RPM_ACTIVE;
}
#endif

static inline bool blk_queue_is_zoned(struct request_queue *q)
{}

#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int disk_nr_zones(struct gendisk *disk)
{}
bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
#else /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
	return 0;
}
static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
{
	return false;
}
#endif /* CONFIG_BLK_DEV_ZONED */

static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{}

static inline unsigned int bdev_nr_zones(struct block_device *bdev)
{}

static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{}

static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
{}

static inline unsigned int blk_queue_depth(struct request_queue *q)
{}

/*
 * default timeout for SG_IO if none specified
 */
#define BLK_DEFAULT_SG_TIMEOUT
#define BLK_MIN_SG_TIMEOUT

/* This should not be used directly - use rq_for_each_segment */
#define for_each_bio(_bio)

int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
				 const struct attribute_group **groups);
static inline int __must_check add_disk(struct gendisk *disk)
{}
void del_gendisk(struct gendisk *gp);
void invalidate_disk(struct gendisk *disk);
void set_disk_ro(struct gendisk *disk, bool read_only);
void disk_uevent(struct gendisk *disk, enum kobject_action action);

static inline u8 bdev_partno(const struct block_device *bdev)
{}

static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag)
{}

static inline void bdev_set_flag(struct block_device *bdev, unsigned flag)
{}

static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag)
{}

static inline int get_disk_ro(struct gendisk *disk)
{}

static inline int bdev_read_only(struct block_device *bdev)
{}

bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
void disk_force_media_change(struct gendisk *disk);
void bdev_mark_dead(struct block_device *bdev, bool surprise);

void add_disk_randomness(struct gendisk *disk) __latent_entropy;
void rand_initialize_disk(struct gendisk *disk);

static inline sector_t get_start_sect(struct block_device *bdev)
{}

static inline sector_t bdev_nr_sectors(struct block_device *bdev)
{}

static inline loff_t bdev_nr_bytes(struct block_device *bdev)
{}

static inline sector_t get_capacity(struct gendisk *disk)
{}

static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
{}

int bdev_disk_changed(struct gendisk *disk, bool invalidate);

void put_disk(struct gendisk *disk);
struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node,
		struct lock_class_key *lkclass);

/**
 * blk_alloc_disk - allocate a gendisk structure
 * @lim: queue limits to be used for this disk.
 * @node_id: numa node to allocate on
 *
 * Allocate and pre-initialize a gendisk structure for use with BIO based
 * drivers.
 *
 * Returns an ERR_PTR on error, else the allocated disk.
 *
 * Context: can sleep
 */
#define blk_alloc_disk(lim, node_id)

int __register_blkdev(unsigned int major, const char *name,
		void (*probe)(dev_t devt));
#define register_blkdev(major, name)
void unregister_blkdev(unsigned int major, const char *name);

bool disk_check_media_change(struct gendisk *disk);
void set_capacity(struct gendisk *disk, sector_t size);

#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
#else
static inline int bd_link_disk_holder(struct block_device *bdev,
				      struct gendisk *disk)
{
	return 0;
}
static inline void bd_unlink_disk_holder(struct block_device *bdev,
					 struct gendisk *disk)
{
}
#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */

dev_t part_devt(struct gendisk *disk, u8 partno);
void inc_diskseq(struct gendisk *disk);
void blk_request_module(dev_t devt);

extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
void submit_bio_noacct(struct bio *bio);
struct bio *bio_split_to_limits(struct bio *bio);

extern int blk_lld_busy(struct request_queue *q);
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);

/* Helper to convert REQ_OP_XXX to its string format XXX */
extern const char *blk_op_str(enum req_op op);

int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
const char *blk_status_to_str(blk_status_t status);

/* only poll the hardware once, don't continue until a completion was found */
#define BLK_POLL_ONESHOT
int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
			unsigned int flags);

static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{}

/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);

static inline unsigned int bio_zone_no(struct bio *bio)
{}

static inline bool bio_straddles_zones(struct bio *bio)
{}

/*
 * Return how much within the boundary is left to be used for I/O at a given
 * offset.
 */
static inline unsigned int blk_boundary_sectors_left(sector_t offset,
		unsigned int boundary_sectors)
{}

/**
 * queue_limits_start_update - start an atomic update of queue limits
 * @q:		queue to update
 *
 * This functions starts an atomic update of the queue limits.  It takes a lock
 * to prevent other updates and returns a snapshot of the current limits that
 * the caller can modify.  The caller must call queue_limits_commit_update()
 * to finish the update.
 *
 * Context: process context.  The caller must have frozen the queue or ensured
 * that there is outstanding I/O by other means.
 */
static inline struct queue_limits
queue_limits_start_update(struct request_queue *q)
{}
int queue_limits_commit_update(struct request_queue *q,
		struct queue_limits *lim);
int queue_limits_set(struct request_queue *q, struct queue_limits *lim);

/**
 * queue_limits_cancel_update - cancel an atomic update of queue limits
 * @q:		queue to update
 *
 * This functions cancels an atomic update of the queue limits started by
 * queue_limits_start_update() and should be used when an error occurs after
 * starting update.
 */
static inline void queue_limits_cancel_update(struct request_queue *q)
{}

/*
 * These helpers are for drivers that have sloppy feature negotiation and might
 * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O
 * completion handler when the device returned an indicator that the respective
 * feature is not actually supported.  They are racy and the driver needs to
 * cope with that.  Try to avoid this scheme if you can.
 */
static inline void blk_queue_disable_discard(struct request_queue *q)
{}

static inline void blk_queue_disable_secure_erase(struct request_queue *q)
{}

static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
{}

/*
 * Access functions for manipulating queue properties
 */
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
			    sector_t offset);
void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
		sector_t offset, const char *pfx);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);

struct blk_independent_access_ranges *
disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
void disk_set_independent_access_ranges(struct gendisk *disk,
				struct blk_independent_access_ranges *iars);

bool __must_check blk_get_queue(struct request_queue *);
extern void blk_put_queue(struct request_queue *);

void blk_mark_disk_dead(struct gendisk *disk);

#ifdef CONFIG_BLOCK
/*
 * blk_plug permits building a queue of related requests by holding the I/O
 * fragments for a short period. This allows merging of sequential requests
 * into single larger request. As the requests are moved from a per-task list to
 * the device's request_queue in a batch, this results in improved scalability
 * as the lock contention for request_queue lock is reduced.
 *
 * It is ok not to disable preemption when adding the request to the plug list
 * or when attempting a merge. For details, please see schedule() where
 * blk_flush_plug() is called.
 */
struct blk_plug {};

struct blk_plug_cb;
blk_plug_cb_fn;
struct blk_plug_cb {};
extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
					     void *data, int size);
extern void blk_start_plug(struct blk_plug *);
extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
extern void blk_finish_plug(struct blk_plug *);

void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
static inline void blk_flush_plug(struct blk_plug *plug, bool async)
{}

/*
 * tsk == current here
 */
static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
{}

int blkdev_issue_flush(struct block_device *bdev);
long nr_blockdev_pages(void);
#else /* CONFIG_BLOCK */
struct blk_plug {
};

static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
					 unsigned short nr_ios)
{
}

static inline void blk_start_plug(struct blk_plug *plug)
{
}

static inline void blk_finish_plug(struct blk_plug *plug)
{
}

static inline void blk_flush_plug(struct blk_plug *plug, bool async)
{
}

static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
{
}

static inline int blkdev_issue_flush(struct block_device *bdev)
{
	return 0;
}

static inline long nr_blockdev_pages(void)
{
	return 0;
}
#endif /* CONFIG_BLOCK */

extern void blk_io_schedule(void);

int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask);
int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp);

#define BLKDEV_ZERO_NOUNMAP
#define BLKDEV_ZERO_NOFALLBACK
#define BLKDEV_ZERO_KILLABLE

extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
		unsigned flags);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, unsigned flags);

static inline int sb_issue_discard(struct super_block *sb, sector_t block,
		sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
{}
static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
		sector_t nr_blocks, gfp_t gfp_mask)
{}

static inline bool bdev_is_partition(struct block_device *bdev)
{}

enum blk_default_limits {};

/*
 * Default upper limit for the software max_sectors limit used for
 * regular file system I/O.  This can be increased through sysfs.
 *
 * Not to be confused with the max_hw_sector limit that is entirely
 * controlled by the driver, usually based on hardware limits.
 */
#define BLK_DEF_MAX_SECTORS_CAP

static inline unsigned long queue_segment_boundary(const struct request_queue *q)
{}

static inline unsigned long queue_virt_boundary(const struct request_queue *q)
{}

static inline unsigned int queue_max_sectors(const struct request_queue *q)
{}

static inline unsigned int queue_max_bytes(struct request_queue *q)
{}

static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
{}

static inline unsigned short queue_max_segments(const struct request_queue *q)
{}

static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
{}

static inline unsigned int queue_max_segment_size(const struct request_queue *q)
{}

static inline unsigned int queue_limits_max_zone_append_sectors(struct queue_limits *l)
{}

static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q)
{}

static inline bool queue_emulates_zone_append(struct request_queue *q)
{}

static inline bool bdev_emulates_zone_append(struct block_device *bdev)
{}

static inline unsigned int
bdev_max_zone_append_sectors(struct block_device *bdev)
{}

static inline unsigned int bdev_max_segments(struct block_device *bdev)
{}

static inline unsigned queue_logical_block_size(const struct request_queue *q)
{}

static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
{}

static inline unsigned int queue_physical_block_size(const struct request_queue *q)
{}

static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
{}

static inline unsigned int queue_io_min(const struct request_queue *q)
{}

static inline int bdev_io_min(struct block_device *bdev)
{}

static inline unsigned int queue_io_opt(const struct request_queue *q)
{}

static inline int bdev_io_opt(struct block_device *bdev)
{}

static inline unsigned int
queue_zone_write_granularity(const struct request_queue *q)
{}

static inline unsigned int
bdev_zone_write_granularity(struct block_device *bdev)
{}

int bdev_alignment_offset(struct block_device *bdev);
unsigned int bdev_discard_alignment(struct block_device *bdev);

static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
{}

static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
{}

static inline unsigned int
bdev_max_secure_erase_sectors(struct block_device *bdev)
{}

static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
{}

static inline bool bdev_nonrot(struct block_device *bdev)
{}

static inline bool bdev_synchronous(struct block_device *bdev)
{}

static inline bool bdev_stable_writes(struct block_device *bdev)
{}

static inline bool blk_queue_write_cache(struct request_queue *q)
{}

static inline bool bdev_write_cache(struct block_device *bdev)
{}

static inline bool bdev_fua(struct block_device *bdev)
{}

static inline bool bdev_nowait(struct block_device *bdev)
{}

static inline bool bdev_is_zoned(struct block_device *bdev)
{}

static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
{}

static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{}

static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
						   sector_t sector)
{}

static inline sector_t bio_offset_from_zone_start(struct bio *bio)
{}

static inline bool bdev_is_zone_start(struct block_device *bdev,
				      sector_t sector)
{}

static inline int queue_dma_alignment(const struct request_queue *q)
{}

static inline unsigned int
queue_atomic_write_unit_max_bytes(const struct request_queue *q)
{}

static inline unsigned int
queue_atomic_write_unit_min_bytes(const struct request_queue *q)
{}

static inline unsigned int
queue_atomic_write_boundary_bytes(const struct request_queue *q)
{}

static inline unsigned int
queue_atomic_write_max_bytes(const struct request_queue *q)
{}

static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
{}

static inline bool bdev_iter_is_aligned(struct block_device *bdev,
					struct iov_iter *iter)
{}

static inline int blk_lim_dma_alignment_and_pad(struct queue_limits *lim)
{}

static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
				 unsigned int len)
{}

/* assumes size > 256 */
static inline unsigned int blksize_bits(unsigned int size)
{}

int kblockd_schedule_work(struct work_struct *work);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);

#define MODULE_ALIAS_BLOCKDEV(major,minor)
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major)

#ifdef CONFIG_BLK_INLINE_ENCRYPTION

bool blk_crypto_register(struct blk_crypto_profile *profile,
			 struct request_queue *q);

#else /* CONFIG_BLK_INLINE_ENCRYPTION */

static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
				       struct request_queue *q)
{
	return true;
}

#endif /* CONFIG_BLK_INLINE_ENCRYPTION */

enum blk_unique_id {};

struct block_device_operations {};

#ifdef CONFIG_COMPAT
extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t,
				      unsigned int, unsigned long);
#else
#define blkdev_compat_ptr_ioctl
#endif

static inline void blk_wake_io_task(struct task_struct *waiter)
{}

unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
				 unsigned long start_time);
void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
		      unsigned int sectors, unsigned long start_time);

unsigned long bio_start_io_acct(struct bio *bio);
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
		struct block_device *orig_bdev);

/**
 * bio_end_io_acct - end I/O accounting for bio based drivers
 * @bio:	bio to end account for
 * @start_time:	start time returned by bio_start_io_acct()
 */
static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
{}

int bdev_read_only(struct block_device *bdev);
int set_blocksize(struct file *file, int size);

int lookup_bdev(const char *pathname, dev_t *dev);

void blkdev_show(struct seq_file *seqf, off_t offset);

#define BDEVNAME_SIZE
#define BDEVT_SIZE
#ifdef CONFIG_BLOCK
#define BLKDEV_MAJOR_MAX
#else
#define BLKDEV_MAJOR_MAX
#endif

struct blk_holder_ops {};

/*
 * For filesystems using @fs_holder_ops, the @holder argument passed to
 * helpers used to open and claim block devices via
 * bd_prepare_to_claim() must point to a superblock.
 */
extern const struct blk_holder_ops fs_holder_ops;

/*
 * Return the correct open flags for blkdev_get_by_* for super block flags
 * as stored in sb->s_flags.
 */
#define sb_open_mode(flags)

struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
		const struct blk_holder_ops *hops);
struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
		void *holder, const struct blk_holder_ops *hops);
int bd_prepare_to_claim(struct block_device *bdev, void *holder,
		const struct blk_holder_ops *hops);
void bd_abort_claiming(struct block_device *bdev, void *holder);

/* just for blk-cgroup, don't use elsewhere */
struct block_device *blkdev_get_no_open(dev_t dev);
void blkdev_put_no_open(struct block_device *bdev);

struct block_device *I_BDEV(struct inode *inode);
struct block_device *file_bdev(struct file *bdev_file);
bool disk_live(struct gendisk *disk);
unsigned int block_size(struct block_device *bdev);

#ifdef CONFIG_BLOCK
void invalidate_bdev(struct block_device *bdev);
int sync_blockdev(struct block_device *bdev);
int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
int sync_blockdev_nowait(struct block_device *bdev);
void sync_bdevs(bool wait);
void bdev_statx(struct path *, struct kstat *, u32);
void printk_all_partitions(void);
int __init early_lookup_bdev(const char *pathname, dev_t *dev);
#else
static inline void invalidate_bdev(struct block_device *bdev)
{
}
static inline int sync_blockdev(struct block_device *bdev)
{
	return 0;
}
static inline int sync_blockdev_nowait(struct block_device *bdev)
{
	return 0;
}
static inline void sync_bdevs(bool wait)
{
}
static inline void bdev_statx(struct path *path, struct kstat *stat,
				u32 request_mask)
{
}
static inline void printk_all_partitions(void)
{
}
static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
{
	return -EINVAL;
}
#endif /* CONFIG_BLOCK */

int bdev_freeze(struct block_device *bdev);
int bdev_thaw(struct block_device *bdev);
void bdev_fput(struct file *bdev_file);

struct io_comp_batch {};

static inline bool bdev_can_atomic_write(struct block_device *bdev)
{}

#define DEFINE_IO_COMP_BATCH(name)

#endif /* _LINUX_BLKDEV_H */