#ifndef __XEN_BLKIF__BACKEND__COMMON_H__
#define __XEN_BLKIF__BACKEND__COMMON_H__
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/io.h>
#include <linux/rbtree.h>
#include <asm/setup.h>
#include <asm/hypervisor.h>
#include <xen/grant_table.h>
#include <xen/page.h>
#include <xen/xenbus.h>
#include <xen/interface/io/ring.h>
#include <xen/interface/io/blkif.h>
#include <xen/interface/io/protocols.h>
extern unsigned int xen_blkif_max_ring_order;
extern unsigned int xenblk_max_queues;
#define MAX_INDIRECT_SEGMENTS …
#define XEN_PAGES_PER_SEGMENT …
#define XEN_PAGES_PER_INDIRECT_FRAME …
#define SEGS_PER_INDIRECT_FRAME …
#define MAX_INDIRECT_PAGES …
#define INDIRECT_PAGES(_segs) …
struct blkif_common_request { … };
struct blkif_x86_32_request_rw { … } __attribute__((packed));
struct blkif_x86_32_request_discard { … } __attribute__((packed));
struct blkif_x86_32_request_other { … } __attribute__((packed));
struct blkif_x86_32_request_indirect { … } __attribute__((packed));
struct blkif_x86_32_request { … } __attribute__((packed));
struct blkif_x86_64_request_rw { … } __attribute__((packed));
struct blkif_x86_64_request_discard { … } __attribute__((packed));
struct blkif_x86_64_request_other { … } __attribute__((packed));
struct blkif_x86_64_request_indirect { … } __attribute__((packed));
struct blkif_x86_64_request { … } __attribute__((packed));
DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
struct blkif_response);
DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
struct blkif_response __packed);
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
struct blkif_response);
blkif_back_rings;
enum blkif_protocol { … };
#ifdef CONFIG_X86
#define BLKIF_PROTOCOL_DEFAULT …
#else
#define BLKIF_PROTOCOL_DEFAULT …
#endif
struct xen_vbd { … };
struct backend_info;
#define XEN_BLKIF_REQS_PER_PAGE …
struct persistent_gnt { … };
struct xen_blkif_ring { … };
struct xen_blkif { … };
struct seg_buf { … };
struct grant_page { … };
struct pending_req { … };
#define vbd_sz(_v) …
#define xen_blkif_get(_b) …
#define xen_blkif_put(_b) …
struct phys_req { … };
int xen_blkif_interface_init(void);
void xen_blkif_interface_fini(void);
int xen_blkif_xenbus_init(void);
void xen_blkif_xenbus_fini(void);
irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
int xen_blkif_schedule(void *arg);
void xen_blkbk_free_caches(struct xen_blkif_ring *ring);
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
struct backend_info *be, int state);
int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct backend_info *be, int state);
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
void xen_blkbk_unmap_purged_grants(struct work_struct *work);
#endif