#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H
#include <linux/types.h>
#include <linux/blk_types.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/llist.h>
#include <linux/minmax.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
#include <linux/bio.h>
#include <linux/gfp.h>
#include <linux/kdev_t.h>
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
#include <linux/blkzoned.h>
#include <linux/sched.h>
#include <linux/sbitmap.h>
#include <linux/uuid.h>
#include <linux/xarray.h>
#include <linux/file.h>
struct module;
struct request_queue;
struct elevator_queue;
struct blk_trace;
struct request;
struct sg_io_hdr;
struct blkcg_gq;
struct blk_flush_queue;
struct kiocb;
struct pr_ops;
struct rq_qos;
struct blk_queue_stats;
struct blk_stat_callback;
struct blk_crypto_profile;
extern const struct device_type disk_type;
extern const struct device_type part_type;
extern const struct class block_class;
#define BLKCG_MAX_POLS …
#define DISK_MAX_PARTS …
#define DISK_NAME_LEN …
#define PARTITION_META_INFO_VOLNAMELTH …
#define PARTITION_META_INFO_UUIDLTH …
struct partition_meta_info { … };
enum { … };
enum { … };
enum { … };
struct disk_events;
struct badblocks;
enum blk_integrity_checksum { … } __packed ;
struct blk_integrity { … };
blk_mode_t;
#define BLK_OPEN_READ …
#define BLK_OPEN_WRITE …
#define BLK_OPEN_EXCL …
#define BLK_OPEN_NDELAY …
#define BLK_OPEN_WRITE_IOCTL …
#define BLK_OPEN_RESTRICT_WRITES …
#define BLK_OPEN_STRICT_SCAN …
struct gendisk { … };
static inline unsigned int disk_openers(struct gendisk *disk)
{ … }
static inline bool disk_has_partscan(struct gendisk *disk)
{ … }
#define dev_to_disk(device) …
#define disk_to_dev(disk) …
#if IS_REACHABLE(CONFIG_CDROM)
#define disk_to_cdi(disk) …
#else
#define disk_to_cdi …
#endif
static inline dev_t disk_devt(struct gendisk *disk)
{ … }
static inline int blk_validate_block_size(unsigned long bsize)
{ … }
static inline bool blk_op_is_passthrough(blk_opf_t op)
{ … }
blk_features_t;
#define BLK_FEAT_WRITE_CACHE …
#define BLK_FEAT_FUA …
#define BLK_FEAT_ROTATIONAL …
#define BLK_FEAT_ADD_RANDOM …
#define BLK_FEAT_IO_STAT …
#define BLK_FEAT_STABLE_WRITES …
#define BLK_FEAT_SYNCHRONOUS …
#define BLK_FEAT_NOWAIT …
#define BLK_FEAT_DAX …
#define BLK_FEAT_POLL …
#define BLK_FEAT_ZONED …
#define BLK_FEAT_PCI_P2PDMA …
#define BLK_FEAT_SKIP_TAGSET_QUIESCE …
#define BLK_FEAT_BOUNCE_HIGH …
#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE …
#define BLK_FEAT_INHERIT_MASK …
blk_flags_t;
#define BLK_FLAG_WRITE_CACHE_DISABLED …
#define BLK_FLAG_MISALIGNED …
struct queue_limits { … };
report_zones_cb;
#define BLK_ALL_ZONES …
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
sector_t sectors, sector_t nr_sectors);
int blk_revalidate_disk_zones(struct gendisk *disk);
struct blk_independent_access_range { … };
struct blk_independent_access_ranges { … };
struct request_queue { … };
enum { … };
#define QUEUE_FLAG_MQ_DEFAULT …
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_dying(q) …
#define blk_queue_init_done(q) …
#define blk_queue_nomerges(q) …
#define blk_queue_noxmerges(q) …
#define blk_queue_nonrot(q) …
#define blk_queue_io_stat(q) …
#define blk_queue_dax(q) …
#define blk_queue_pci_p2pdma(q) …
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
#define blk_queue_rq_alloc_time(q) …
#else
#define blk_queue_rq_alloc_time …
#endif
#define blk_noretry_request(rq) …
#define blk_queue_quiesced(q) …
#define blk_queue_pm_only(q) …
#define blk_queue_registered(q) …
#define blk_queue_sq_sched(q) …
#define blk_queue_skip_tagset_quiesce(q) …
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
#define list_entry_rq(ptr) …
#define dma_map_bvec(dev, bv, dir, attrs) …
static inline bool queue_is_mq(struct request_queue *q)
{ … }
#ifdef CONFIG_PM
static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{ … }
#else
static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{
return RPM_ACTIVE;
}
#endif
static inline bool blk_queue_is_zoned(struct request_queue *q)
{ … }
#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int disk_nr_zones(struct gendisk *disk)
{ … }
bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
#else
static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
return 0;
}
static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
{
return false;
}
#endif
static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{ … }
static inline unsigned int bdev_nr_zones(struct block_device *bdev)
{ … }
static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{ … }
static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
{ … }
static inline unsigned int blk_queue_depth(struct request_queue *q)
{ … }
#define BLK_DEFAULT_SG_TIMEOUT …
#define BLK_MIN_SG_TIMEOUT …
#define for_each_bio(_bio) …
int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
const struct attribute_group **groups);
static inline int __must_check add_disk(struct gendisk *disk)
{ … }
void del_gendisk(struct gendisk *gp);
void invalidate_disk(struct gendisk *disk);
void set_disk_ro(struct gendisk *disk, bool read_only);
void disk_uevent(struct gendisk *disk, enum kobject_action action);
static inline u8 bdev_partno(const struct block_device *bdev)
{ … }
static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag)
{ … }
static inline void bdev_set_flag(struct block_device *bdev, unsigned flag)
{ … }
static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag)
{ … }
static inline int get_disk_ro(struct gendisk *disk)
{ … }
static inline int bdev_read_only(struct block_device *bdev)
{ … }
bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
void disk_force_media_change(struct gendisk *disk);
void bdev_mark_dead(struct block_device *bdev, bool surprise);
void add_disk_randomness(struct gendisk *disk) __latent_entropy;
void rand_initialize_disk(struct gendisk *disk);
static inline sector_t get_start_sect(struct block_device *bdev)
{ … }
static inline sector_t bdev_nr_sectors(struct block_device *bdev)
{ … }
static inline loff_t bdev_nr_bytes(struct block_device *bdev)
{ … }
static inline sector_t get_capacity(struct gendisk *disk)
{ … }
static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
{ … }
int bdev_disk_changed(struct gendisk *disk, bool invalidate);
void put_disk(struct gendisk *disk);
struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node,
struct lock_class_key *lkclass);
#define blk_alloc_disk(lim, node_id) …
int __register_blkdev(unsigned int major, const char *name,
void (*probe)(dev_t devt));
#define register_blkdev(major, name) …
void unregister_blkdev(unsigned int major, const char *name);
bool disk_check_media_change(struct gendisk *disk);
void set_capacity(struct gendisk *disk, sector_t size);
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
#else
static inline int bd_link_disk_holder(struct block_device *bdev,
struct gendisk *disk)
{
return 0;
}
static inline void bd_unlink_disk_holder(struct block_device *bdev,
struct gendisk *disk)
{
}
#endif
dev_t part_devt(struct gendisk *disk, u8 partno);
void inc_diskseq(struct gendisk *disk);
void blk_request_module(dev_t devt);
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
void submit_bio_noacct(struct bio *bio);
struct bio *bio_split_to_limits(struct bio *bio);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
extern const char *blk_op_str(enum req_op op);
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
const char *blk_status_to_str(blk_status_t status);
#define BLK_POLL_ONESHOT …
int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
unsigned int flags);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{ … }
const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
static inline unsigned int bio_zone_no(struct bio *bio)
{ … }
static inline bool bio_straddles_zones(struct bio *bio)
{ … }
static inline unsigned int blk_boundary_sectors_left(sector_t offset,
unsigned int boundary_sectors)
{ … }
static inline struct queue_limits
queue_limits_start_update(struct request_queue *q)
{ … }
int queue_limits_commit_update(struct request_queue *q,
struct queue_limits *lim);
int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
static inline void queue_limits_cancel_update(struct request_queue *q)
{ … }
static inline void blk_queue_disable_discard(struct request_queue *q)
{ … }
static inline void blk_queue_disable_secure_erase(struct request_queue *q)
{ … }
static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
{ … }
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
sector_t offset, const char *pfx);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
struct blk_independent_access_ranges *
disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
void disk_set_independent_access_ranges(struct gendisk *disk,
struct blk_independent_access_ranges *iars);
bool __must_check blk_get_queue(struct request_queue *);
extern void blk_put_queue(struct request_queue *);
void blk_mark_disk_dead(struct gendisk *disk);
#ifdef CONFIG_BLOCK
struct blk_plug { … };
struct blk_plug_cb;
blk_plug_cb_fn;
struct blk_plug_cb { … };
extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
void *data, int size);
extern void blk_start_plug(struct blk_plug *);
extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
extern void blk_finish_plug(struct blk_plug *);
void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
static inline void blk_flush_plug(struct blk_plug *plug, bool async)
{ … }
static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
{ … }
int blkdev_issue_flush(struct block_device *bdev);
long nr_blockdev_pages(void);
#else
struct blk_plug {
};
static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
unsigned short nr_ios)
{
}
static inline void blk_start_plug(struct blk_plug *plug)
{
}
static inline void blk_finish_plug(struct blk_plug *plug)
{
}
static inline void blk_flush_plug(struct blk_plug *plug, bool async)
{
}
static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
{
}
static inline int blkdev_issue_flush(struct block_device *bdev)
{
return 0;
}
static inline long nr_blockdev_pages(void)
{
return 0;
}
#endif
extern void blk_io_schedule(void);
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask);
int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp);
#define BLKDEV_ZERO_NOUNMAP …
#define BLKDEV_ZERO_NOFALLBACK …
#define BLKDEV_ZERO_KILLABLE …
extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
unsigned flags);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
{ … }
static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask)
{ … }
static inline bool bdev_is_partition(struct block_device *bdev)
{ … }
enum blk_default_limits { … };
#define BLK_DEF_MAX_SECTORS_CAP …
static inline unsigned long queue_segment_boundary(const struct request_queue *q)
{ … }
static inline unsigned long queue_virt_boundary(const struct request_queue *q)
{ … }
static inline unsigned int queue_max_sectors(const struct request_queue *q)
{ … }
static inline unsigned int queue_max_bytes(struct request_queue *q)
{ … }
static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
{ … }
static inline unsigned short queue_max_segments(const struct request_queue *q)
{ … }
static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
{ … }
static inline unsigned int queue_max_segment_size(const struct request_queue *q)
{ … }
static inline unsigned int queue_limits_max_zone_append_sectors(struct queue_limits *l)
{ … }
static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q)
{ … }
static inline bool queue_emulates_zone_append(struct request_queue *q)
{ … }
static inline bool bdev_emulates_zone_append(struct block_device *bdev)
{ … }
static inline unsigned int
bdev_max_zone_append_sectors(struct block_device *bdev)
{ … }
static inline unsigned int bdev_max_segments(struct block_device *bdev)
{ … }
static inline unsigned queue_logical_block_size(const struct request_queue *q)
{ … }
static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
{ … }
static inline unsigned int queue_physical_block_size(const struct request_queue *q)
{ … }
static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
{ … }
static inline unsigned int queue_io_min(const struct request_queue *q)
{ … }
static inline int bdev_io_min(struct block_device *bdev)
{ … }
static inline unsigned int queue_io_opt(const struct request_queue *q)
{ … }
static inline int bdev_io_opt(struct block_device *bdev)
{ … }
static inline unsigned int
queue_zone_write_granularity(const struct request_queue *q)
{ … }
static inline unsigned int
bdev_zone_write_granularity(struct block_device *bdev)
{ … }
int bdev_alignment_offset(struct block_device *bdev);
unsigned int bdev_discard_alignment(struct block_device *bdev);
static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
{ … }
static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
{ … }
static inline unsigned int
bdev_max_secure_erase_sectors(struct block_device *bdev)
{ … }
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
{ … }
static inline bool bdev_nonrot(struct block_device *bdev)
{ … }
static inline bool bdev_synchronous(struct block_device *bdev)
{ … }
static inline bool bdev_stable_writes(struct block_device *bdev)
{ … }
static inline bool blk_queue_write_cache(struct request_queue *q)
{ … }
static inline bool bdev_write_cache(struct block_device *bdev)
{ … }
static inline bool bdev_fua(struct block_device *bdev)
{ … }
static inline bool bdev_nowait(struct block_device *bdev)
{ … }
static inline bool bdev_is_zoned(struct block_device *bdev)
{ … }
static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
{ … }
static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{ … }
static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
sector_t sector)
{ … }
static inline sector_t bio_offset_from_zone_start(struct bio *bio)
{ … }
static inline bool bdev_is_zone_start(struct block_device *bdev,
sector_t sector)
{ … }
static inline int queue_dma_alignment(const struct request_queue *q)
{ … }
static inline unsigned int
queue_atomic_write_unit_max_bytes(const struct request_queue *q)
{ … }
static inline unsigned int
queue_atomic_write_unit_min_bytes(const struct request_queue *q)
{ … }
static inline unsigned int
queue_atomic_write_boundary_bytes(const struct request_queue *q)
{ … }
static inline unsigned int
queue_atomic_write_max_bytes(const struct request_queue *q)
{ … }
static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
{ … }
static inline bool bdev_iter_is_aligned(struct block_device *bdev,
struct iov_iter *iter)
{ … }
static inline int blk_lim_dma_alignment_and_pad(struct queue_limits *lim)
{ … }
static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
unsigned int len)
{ … }
static inline unsigned int blksize_bits(unsigned int size)
{ … }
int kblockd_schedule_work(struct work_struct *work);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#define MODULE_ALIAS_BLOCKDEV(major,minor) …
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) …
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
bool blk_crypto_register(struct blk_crypto_profile *profile,
struct request_queue *q);
#else
static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
struct request_queue *q)
{
return true;
}
#endif
enum blk_unique_id { … };
struct block_device_operations { … };
#ifdef CONFIG_COMPAT
extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t,
unsigned int, unsigned long);
#else
#define blkdev_compat_ptr_ioctl …
#endif
static inline void blk_wake_io_task(struct task_struct *waiter)
{ … }
unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
unsigned long start_time);
void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
unsigned int sectors, unsigned long start_time);
unsigned long bio_start_io_acct(struct bio *bio);
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
struct block_device *orig_bdev);
static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
{ … }
int bdev_read_only(struct block_device *bdev);
int set_blocksize(struct file *file, int size);
int lookup_bdev(const char *pathname, dev_t *dev);
void blkdev_show(struct seq_file *seqf, off_t offset);
#define BDEVNAME_SIZE …
#define BDEVT_SIZE …
#ifdef CONFIG_BLOCK
#define BLKDEV_MAJOR_MAX …
#else
#define BLKDEV_MAJOR_MAX …
#endif
struct blk_holder_ops { … };
extern const struct blk_holder_ops fs_holder_ops;
#define sb_open_mode(flags) …
struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops);
struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
void *holder, const struct blk_holder_ops *hops);
int bd_prepare_to_claim(struct block_device *bdev, void *holder,
const struct blk_holder_ops *hops);
void bd_abort_claiming(struct block_device *bdev, void *holder);
struct block_device *blkdev_get_no_open(dev_t dev);
void blkdev_put_no_open(struct block_device *bdev);
struct block_device *I_BDEV(struct inode *inode);
struct block_device *file_bdev(struct file *bdev_file);
bool disk_live(struct gendisk *disk);
unsigned int block_size(struct block_device *bdev);
#ifdef CONFIG_BLOCK
void invalidate_bdev(struct block_device *bdev);
int sync_blockdev(struct block_device *bdev);
int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
int sync_blockdev_nowait(struct block_device *bdev);
void sync_bdevs(bool wait);
void bdev_statx(struct path *, struct kstat *, u32);
void printk_all_partitions(void);
int __init early_lookup_bdev(const char *pathname, dev_t *dev);
#else
static inline void invalidate_bdev(struct block_device *bdev)
{
}
static inline int sync_blockdev(struct block_device *bdev)
{
return 0;
}
static inline int sync_blockdev_nowait(struct block_device *bdev)
{
return 0;
}
static inline void sync_bdevs(bool wait)
{
}
static inline void bdev_statx(struct path *path, struct kstat *stat,
u32 request_mask)
{
}
static inline void printk_all_partitions(void)
{
}
static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
{
return -EINVAL;
}
#endif
int bdev_freeze(struct block_device *bdev);
int bdev_thaw(struct block_device *bdev);
void bdev_fput(struct file *bdev_file);
struct io_comp_batch { … };
static inline bool bdev_can_atomic_write(struct block_device *bdev)
{ … }
#define DEFINE_IO_COMP_BATCH(name) …
#endif