linux/drivers/block/xen-blkfront.c

/*
 * blkfront.c
 *
 * XenLinux virtual block device driver.
 *
 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
 * Copyright (c) 2004, Christian Limpach
 * Copyright (c) 2004, Andrew Warfield
 * Copyright (c) 2005, Christopher Clark
 * Copyright (c) 2005, XenSource Ltd
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/sched/mm.h>

#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/grant_table.h>
#include <xen/events.h>
#include <xen/page.h>
#include <xen/platform_pci.h>

#include <xen/interface/grant_table.h>
#include <xen/interface/io/blkif.h>
#include <xen/interface/io/protocols.h>

#include <asm/xen/hypervisor.h>

/*
 * The minimal size of segment supported by the block framework is PAGE_SIZE.
 * When Linux is using a different page size than Xen, it may not be possible
 * to put all the data in a single segment.
 * This can happen when the backend doesn't support indirect descriptor and
 * therefore the maximum amount of data that a request can carry is
 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE = 44KB
 *
 * Note that we only support one extra request. So the Linux page size
 * should be <= ( 2 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) =
 * 88KB.
 */
#define HAS_EXTRA_REQ

enum blkif_state {};

struct grant {};

enum blk_req_status {};

struct blk_shadow {};

struct blkif_req {};

static inline struct blkif_req *blkif_req(struct request *rq)
{}

static DEFINE_MUTEX(blkfront_mutex);
static const struct block_device_operations xlvbd_block_fops;
static struct delayed_work blkfront_work;
static LIST_HEAD(info_list);

/*
 * Maximum number of segments in indirect requests, the actual value used by
 * the frontend driver is the minimum of this value and the value provided
 * by the backend driver.
 */

static unsigned int xen_blkif_max_segments =;
module_param_named(max_indirect_segments, xen_blkif_max_segments, uint, 0444);
MODULE_PARM_DESC();

static unsigned int xen_blkif_max_queues =;
module_param_named(max_queues, xen_blkif_max_queues, uint, 0444);
MODULE_PARM_DESC();

/*
 * Maximum order of pages to be used for the shared ring between front and
 * backend, 4KB page granularity is used.
 */
static unsigned int xen_blkif_max_ring_order;
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
MODULE_PARM_DESC();

static bool __read_mostly xen_blkif_trusted =;
module_param_named(trusted, xen_blkif_trusted, bool, 0644);
MODULE_PARM_DESC();

#define BLK_RING_SIZE(info)

/*
 * ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
 * characters are enough. Define to 20 to keep consistent with backend.
 */
#define RINGREF_NAME_LEN
/*
 * queue-%u would take 7 + 10(UINT_MAX) = 17 characters.
 */
#define QUEUE_NAME_LEN

/*
 *  Per-ring info.
 *  Every blkfront device can associate with one or more blkfront_ring_info,
 *  depending on how many hardware queues/rings to be used.
 */
struct blkfront_ring_info {};

/*
 * We have one of these per vbd, whether ide, scsi or 'other'.  They
 * hang in private_data off the gendisk structure. We may end up
 * putting all kinds of interesting stuff here :-)
 */
struct blkfront_info
{};

static unsigned int nr_minors;
static unsigned long *minors;
static DEFINE_SPINLOCK(minor_lock);

#define PARTS_PER_DISK
#define PARTS_PER_EXT_DISK

#define BLKIF_MAJOR(dev)
#define BLKIF_MINOR(dev)

#define EXT_SHIFT
#define EXTENDED
#define VDEV_IS_EXTENDED(dev)
#define BLKIF_MINOR_EXT(dev)
#define EMULATED_HD_DISK_MINOR_OFFSET
#define EMULATED_HD_DISK_NAME_OFFSET
#define EMULATED_SD_DISK_MINOR_OFFSET
#define EMULATED_SD_DISK_NAME_OFFSET

#define DEV_NAME

/*
 * Grants are always the same size as a Xen page (i.e 4KB).
 * A physical segment is always the same size as a Linux page.
 * Number of grants per physical segment
 */
#define GRANTS_PER_PSEG

#define GRANTS_PER_INDIRECT_FRAME

#define INDIRECT_GREFS(_grants)

static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
static void blkfront_gather_backend_features(struct blkfront_info *info);
static int negotiate_mq(struct blkfront_info *info);

#define for_each_rinfo(info, ptr, idx)

static inline struct blkfront_ring_info *
get_rinfo(const struct blkfront_info *info, unsigned int i)
{}

static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
{}

static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
			      unsigned long id)
{}

static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
{}

static struct grant *get_free_grant(struct blkfront_ring_info *rinfo)
{}

static inline void grant_foreign_access(const struct grant *gnt_list_entry,
					const struct blkfront_info *info)
{}

static struct grant *get_grant(grant_ref_t *gref_head,
			       unsigned long gfn,
			       struct blkfront_ring_info *rinfo)
{}

static struct grant *get_indirect_grant(grant_ref_t *gref_head,
					struct blkfront_ring_info *rinfo)
{}

static const char *op_name(int op)
{}
static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
{}

static void xlbd_release_minors(unsigned int minor, unsigned int nr)
{}

static void blkif_restart_queue_callback(void *arg)
{}

static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
{}

static int blkif_ioctl(struct block_device *bdev, blk_mode_t mode,
		       unsigned command, unsigned long argument)
{}

static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
					    struct request *req,
					    struct blkif_request **ring_req)
{}

static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
{}

struct setup_rw_req {};

static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset,
				     unsigned int len, void *data)
{}

static void blkif_setup_extra_req(struct blkif_request *first,
				  struct blkif_request *second)
{}

static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
{}

/*
 * Generate a Xen blkfront IO request from a blk layer request.  Reads
 * and writes are handled as expected.
 *
 * @req: a request struct
 */
static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
{}

static inline void flush_requests(struct blkfront_ring_info *rinfo)
{}

static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
			  const struct blk_mq_queue_data *qd)
{}

static void blkif_complete_rq(struct request *rq)
{}

static const struct blk_mq_ops blkfront_mq_ops =;

static void blkif_set_queue_limits(const struct blkfront_info *info,
		struct queue_limits *lim)
{}

static const char *flush_info(struct blkfront_info *info)
{}

static void xlvbd_flush(struct blkfront_info *info)
{}

static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
{}

static char *encode_disk_name(char *ptr, unsigned int n)
{}

static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
		struct blkfront_info *info)
{}

/* Already hold rinfo->ring_lock. */
static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
{}

static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
{}

static void blkif_restart_queue(struct work_struct *work)
{}

static void blkif_free_ring(struct blkfront_ring_info *rinfo)
{}

static void blkif_free(struct blkfront_info *info, int suspend)
{}

struct copy_from_grant {};

static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset,
				  unsigned int len, void *data)
{}

static enum blk_req_status blkif_rsp_to_req_status(int rsp)
{}

/*
 * Get the final status of the block request based on two ring response
 */
static int blkif_get_final_status(enum blk_req_status s1,
				  enum blk_req_status s2)
{}

/*
 * Return values:
 *  1 response processed.
 *  0 missing further responses.
 * -1 error while processing.
 */
static int blkif_completion(unsigned long *id,
			    struct blkfront_ring_info *rinfo,
			    struct blkif_response *bret)
{}

static irqreturn_t blkif_interrupt(int irq, void *dev_id)
{}


static int setup_blkring(struct xenbus_device *dev,
			 struct blkfront_ring_info *rinfo)
{}

/*
 * Write out per-ring/queue nodes including ring-ref and event-channel, and each
 * ring buffer may have multi pages depending on ->nr_ring_pages.
 */
static int write_per_ring_nodes(struct xenbus_transaction xbt,
				struct blkfront_ring_info *rinfo, const char *dir)
{}

/* Enable the persistent grants feature. */
static bool feature_persistent =;
module_param(feature_persistent, bool, 0644);
MODULE_PARM_DESC();

/* Common code used when first setting up, and when resuming. */
static int talk_to_blkback(struct xenbus_device *dev,
			   struct blkfront_info *info)
{}

static int negotiate_mq(struct blkfront_info *info)
{}

/*
 * Entry point to this code when a new device is created.  Allocate the basic
 * structures and the ring buffer for communication with the backend, and
 * inform the backend of the appropriate details for those.  Switch to
 * Initialised state.
 */
static int blkfront_probe(struct xenbus_device *dev,
			  const struct xenbus_device_id *id)
{}

static int blkif_recover(struct blkfront_info *info)
{}

/*
 * We are reconnecting to the backend, due to a suspend/resume, or a backend
 * driver restart.  We tear down our blkif structure and recreate it, but
 * leave the device-layer structures intact so that this is transparent to the
 * rest of the kernel.
 */
static int blkfront_resume(struct xenbus_device *dev)
{}

static void blkfront_closing(struct blkfront_info *info)
{}

static void blkfront_setup_discard(struct blkfront_info *info)
{}

static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
{}

/*
 * Gather all backend feature-*
 */
static void blkfront_gather_backend_features(struct blkfront_info *info)
{}

/*
 * Invoked when the backend is finally 'ready' (and has told produced
 * the details about the physical device - #sectors, size, etc).
 */
static void blkfront_connect(struct blkfront_info *info)
{}

/*
 * Callback received when the backend's state changes.
 */
static void blkback_changed(struct xenbus_device *dev,
			    enum xenbus_state backend_state)
{}

static void blkfront_remove(struct xenbus_device *xbdev)
{}

static int blkfront_is_ready(struct xenbus_device *dev)
{}

static const struct block_device_operations xlvbd_block_fops =;


static const struct xenbus_device_id blkfront_ids[] =;

static struct xenbus_driver blkfront_driver =;

static void purge_persistent_grants(struct blkfront_info *info)
{}

static void blkfront_delay_work(struct work_struct *work)
{}

static int __init xlblk_init(void)
{}
module_init();


static void __exit xlblk_exit(void)
{}
module_exit(xlblk_exit);

MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_ALIAS_BLOCKDEV_MAJOR();
MODULE_ALIAS();
MODULE_ALIAS();