linux/include/linux/blk_types.h

/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Block data types and constants.  Directly include this file only to
 * break include dependency loop.
 */
#ifndef __LINUX_BLK_TYPES_H
#define __LINUX_BLK_TYPES_H

#include <linux/types.h>
#include <linux/bvec.h>
#include <linux/device.h>
#include <linux/ktime.h>
#include <linux/rw_hint.h>

struct bio_set;
struct bio;
struct bio_integrity_payload;
struct page;
struct io_context;
struct cgroup_subsys_state;
bio_end_io_t;
struct bio_crypt_ctx;

/*
 * The basic unit of block I/O is a sector. It is used in a number of contexts
 * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
 * bytes. Variables of type sector_t represent an offset or size that is a
 * multiple of 512 bytes. Hence these two constants.
 */
#ifndef SECTOR_SHIFT
#define SECTOR_SHIFT
#endif
#ifndef SECTOR_SIZE
#define SECTOR_SIZE
#endif

#define PAGE_SECTORS_SHIFT
#define PAGE_SECTORS
#define SECTOR_MASK

struct block_device {} __randomize_layout;

#define bdev_whole(_bdev)

#define dev_to_bdev(device)

#define bdev_kobj(_bdev)

/*
 * Block error status values.  See block/blk-core:blk_errors for the details.
 */
blk_status_t;
blk_short_t;
#define BLK_STS_OK
#define BLK_STS_NOTSUPP
#define BLK_STS_TIMEOUT
#define BLK_STS_NOSPC
#define BLK_STS_TRANSPORT
#define BLK_STS_TARGET
#define BLK_STS_RESV_CONFLICT
#define BLK_STS_MEDIUM
#define BLK_STS_PROTECTION
#define BLK_STS_RESOURCE
#define BLK_STS_IOERR

/* hack for device mapper, don't use elsewhere: */
#define BLK_STS_DM_REQUEUE

/*
 * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
 * and the bio would block (cf bio_wouldblock_error())
 */
#define BLK_STS_AGAIN

/*
 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
 * device related resources are unavailable, but the driver can guarantee
 * that the queue will be rerun in the future once resources become
 * available again. This is typically the case for device specific
 * resources that are consumed for IO. If the driver fails allocating these
 * resources, we know that inflight (or pending) IO will free these
 * resource upon completion.
 *
 * This is different from BLK_STS_RESOURCE in that it explicitly references
 * a device specific resource. For resources of wider scope, allocation
 * failure can happen without having pending IO. This means that we can't
 * rely on request completions freeing these resources, as IO may not be in
 * flight. Examples of that are kernel memory allocations, DMA mappings, or
 * any other system wide resources.
 */
#define BLK_STS_DEV_RESOURCE

/*
 * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
 * path if the device returns a status indicating that too many zone resources
 * are currently open. The same command should be successful if resubmitted
 * after the number of open zones decreases below the device's limits, which is
 * reported in the request_queue's max_open_zones.
 */
#define BLK_STS_ZONE_OPEN_RESOURCE

/*
 * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
 * path if the device returns a status indicating that too many zone resources
 * are currently active. The same command should be successful if resubmitted
 * after the number of active zones decreases below the device's limits, which
 * is reported in the request_queue's max_active_zones.
 */
#define BLK_STS_ZONE_ACTIVE_RESOURCE

/*
 * BLK_STS_OFFLINE is returned from the driver when the target device is offline
 * or is being taken offline. This could help differentiate the case where a
 * device is intentionally being shut down from a real I/O error.
 */
#define BLK_STS_OFFLINE

/*
 * BLK_STS_DURATION_LIMIT is returned from the driver when the target device
 * aborted the command because it exceeded one of its Command Duration Limits.
 */
#define BLK_STS_DURATION_LIMIT

/*
 * Invalid size or alignment.
 */
#define BLK_STS_INVAL

/**
 * blk_path_error - returns true if error may be path related
 * @error: status the request was completed with
 *
 * Description:
 *     This classifies block error status into non-retryable errors and ones
 *     that may be successful if retried on a failover path.
 *
 * Return:
 *     %false - retrying failover path will not help
 *     %true  - may succeed if retried
 */
static inline bool blk_path_error(blk_status_t error)
{}

struct bio_issue {};

blk_opf_t;

blk_qc_t;
#define BLK_QC_T_NONE

/*
 * main unit of I/O for the block layer and lower layers (ie drivers and
 * stacking drivers)
 */
struct bio {};

#define BIO_RESET_BYTES
#define BIO_MAX_SECTORS

/*
 * bio flags
 */
enum {};

blk_mq_req_flags_t;

#define REQ_OP_BITS
#define REQ_OP_MASK
#define REQ_FLAG_BITS

/**
 * enum req_op - Operations common to the bio and request structures.
 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
 *
 * The least significant bit of the operation number indicates the data
 * transfer direction:
 *
 *   - if the least significant bit is set transfers are TO the device
 *   - if the least significant bit is not set transfers are FROM the device
 *
 * If a operation does not transfer data the least significant bit has no
 * meaning.
 */
enum req_op {};

/* Keep cmd_flag_name[] in sync with the definitions below */
enum req_flag_bits {};

#define REQ_FAILFAST_DEV
#define REQ_FAILFAST_TRANSPORT
#define REQ_FAILFAST_DRIVER
#define REQ_SYNC
#define REQ_META
#define REQ_PRIO
#define REQ_NOMERGE
#define REQ_IDLE
#define REQ_INTEGRITY
#define REQ_FUA
#define REQ_PREFLUSH
#define REQ_RAHEAD
#define REQ_BACKGROUND
#define REQ_NOWAIT
#define REQ_POLLED
#define REQ_ALLOC_CACHE
#define REQ_SWAP
#define REQ_DRV
#define REQ_FS_PRIVATE
#define REQ_ATOMIC

#define REQ_NOUNMAP

#define REQ_FAILFAST_MASK

#define REQ_NOMERGE_FLAGS

enum stat_group {};

static inline enum req_op bio_op(const struct bio *bio)
{}

static inline bool op_is_write(blk_opf_t op)
{}

/*
 * Check if the bio or request is one that needs special treatment in the
 * flush state machine.
 */
static inline bool op_is_flush(blk_opf_t op)
{}

/*
 * Reads are always treated as synchronous, as are requests with the FUA or
 * PREFLUSH flag.  Other operations may be marked as synchronous using the
 * REQ_SYNC flag.
 */
static inline bool op_is_sync(blk_opf_t op)
{}

static inline bool op_is_discard(blk_opf_t op)
{}

/*
 * Check if a bio or request operation is a zone management operation, with
 * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
 * due to its different handling in the block layer and device response in
 * case of command failure.
 */
static inline bool op_is_zone_mgmt(enum req_op op)
{}

static inline int op_stat_group(enum req_op op)
{}

struct blk_rq_stat {};

#endif /* __LINUX_BLK_TYPES_H */