linux/fs/fuse/virtio_fs.c

// SPDX-License-Identifier: GPL-2.0
/*
 * virtio-fs: Virtio Filesystem
 * Copyright (C) 2018 Red Hat, Inc.
 */

#include <linux/fs.h>
#include <linux/dax.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/group_cpus.h>
#include <linux/pfn_t.h>
#include <linux/memremap.h>
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_fs.h>
#include <linux/delay.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/highmem.h>
#include <linux/cleanup.h>
#include <linux/uio.h>
#include "fuse_i.h"

/* Used to help calculate the FUSE connection's max_pages limit for a request's
 * size. Parts of the struct fuse_req are sliced into scattergather lists in
 * addition to the pages used, so this can help account for that overhead.
 */
#define FUSE_HEADER_OVERHEAD

/* List of virtio-fs device instances and a lock for the list. Also provides
 * mutual exclusion in device removal and mounting path
 */
static DEFINE_MUTEX(virtio_fs_mutex);
static LIST_HEAD(virtio_fs_instances);

/* The /sys/fs/virtio_fs/ kset */
static struct kset *virtio_fs_kset;

enum {};

#define VQ_NAME_LEN

/* Per-virtqueue state */
struct virtio_fs_vq {} ____cacheline_aligned_in_smp;

/* A virtio-fs device instance */
struct virtio_fs {};

struct virtio_fs_forget_req {};

struct virtio_fs_forget {};

struct virtio_fs_req_work {};

static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
				 struct fuse_req *req, bool in_flight);

static const struct constant_table dax_param_enums[] =;

enum {};

static const struct fs_parameter_spec virtio_fs_parameters[] =;

static int virtio_fs_parse_param(struct fs_context *fsc,
				 struct fs_parameter *param)
{}

static void virtio_fs_free_fsc(struct fs_context *fsc)
{}

static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
{}

/* Should be called with fsvq->lock held. */
static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
{}

/* Should be called with fsvq->lock held. */
static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
{}

static ssize_t tag_show(struct kobject *kobj,
		struct kobj_attribute *attr, char *buf)
{}

static struct kobj_attribute virtio_fs_tag_attr =;

static struct attribute *virtio_fs_attrs[] =;
ATTRIBUTE_GROUPS();

static void virtio_fs_ktype_release(struct kobject *kobj)
{}

static const struct kobj_type virtio_fs_ktype =;

/* Make sure virtiofs_mutex is held */
static void virtio_fs_put(struct virtio_fs *fs)
{}

static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
{}

static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
{}

static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
{}

static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
{}

static void virtio_fs_start_all_queues(struct virtio_fs *fs)
{}

/* Add a new instance to the list or return -EEXIST if tag name exists*/
static int virtio_fs_add_instance(struct virtio_device *vdev,
				  struct virtio_fs *fs)
{}

/* Return the virtio_fs with a given tag, or NULL */
static struct virtio_fs *virtio_fs_find_instance(const char *tag)
{}

static void virtio_fs_free_devs(struct virtio_fs *fs)
{}

/* Read filesystem name from virtio config into fs->tag (must kfree()). */
static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
{}

/* Work function for hiprio completion */
static void virtio_fs_hiprio_done_work(struct work_struct *work)
{}

static void virtio_fs_request_dispatch_work(struct work_struct *work)
{}

/*
 * Returns 1 if queue is full and sender should wait a bit before sending
 * next request, 0 otherwise.
 */
static int send_forget_request(struct virtio_fs_vq *fsvq,
			       struct virtio_fs_forget *forget,
			       bool in_flight)
{}

static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
{}

/* Allocate and copy args into req->argbuf */
static int copy_args_to_argbuf(struct fuse_req *req)
{}

/* Copy args out of and free req->argbuf */
static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
{}

/* Work function for request completion */
static void virtio_fs_request_complete(struct fuse_req *req,
				       struct virtio_fs_vq *fsvq)
{}

static void virtio_fs_complete_req_work(struct work_struct *work)
{}

static void virtio_fs_requests_done_work(struct work_struct *work)
{}

static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs)
{}

/* Virtqueue interrupt handler */
static void virtio_fs_vq_done(struct virtqueue *vq)
{}

static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
			      int vq_type)
{}

/* Initialize virtqueues */
static int virtio_fs_setup_vqs(struct virtio_device *vdev,
			       struct virtio_fs *fs)
{}

/* Free virtqueues (device must already be reset) */
static void virtio_fs_cleanup_vqs(struct virtio_device *vdev)
{}

/* Map a window offset to a page frame number.  The window offset will have
 * been produced by .iomap_begin(), which maps a file offset to a window
 * offset.
 */
static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
				    long nr_pages, enum dax_access_mode mode,
				    void **kaddr, pfn_t *pfn)
{}

static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
				     pgoff_t pgoff, size_t nr_pages)
{}

static const struct dax_operations virtio_fs_dax_ops =;

static void virtio_fs_cleanup_dax(void *data)
{}

DEFINE_FREE()

static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
{}

static int virtio_fs_probe(struct virtio_device *vdev)
{}

static void virtio_fs_stop_all_queues(struct virtio_fs *fs)
{}

static void virtio_fs_remove(struct virtio_device *vdev)
{}

#ifdef CONFIG_PM_SLEEP
static int virtio_fs_freeze(struct virtio_device *vdev)
{}

static int virtio_fs_restore(struct virtio_device *vdev)
{}
#endif /* CONFIG_PM_SLEEP */

static const struct virtio_device_id id_table[] =;

static const unsigned int feature_table[] =;

static struct virtio_driver virtio_fs_driver =;

static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
__releases(fiq->lock)
{}

static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
__releases(fiq->lock)
{}

/* Count number of scatter-gather elements required */
static unsigned int sg_count_fuse_pages(struct fuse_page_desc *page_descs,
				       unsigned int num_pages,
				       unsigned int total_len)
{}

/* Return the number of scatter-gather list elements required */
static unsigned int sg_count_fuse_req(struct fuse_req *req)
{}

/* Add pages to scatter-gather list and return number of elements used */
static unsigned int sg_init_fuse_pages(struct scatterlist *sg,
				       struct page **pages,
				       struct fuse_page_desc *page_descs,
				       unsigned int num_pages,
				       unsigned int total_len)
{}

/* Add args to scatter-gather list and return number of elements used */
static unsigned int sg_init_fuse_args(struct scatterlist *sg,
				      struct fuse_req *req,
				      struct fuse_arg *args,
				      unsigned int numargs,
				      bool argpages,
				      void *argbuf,
				      unsigned int *len_used)
{}

/* Add a request to a virtqueue and kick the device */
static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
				 struct fuse_req *req, bool in_flight)
{}

static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
__releases(fiq->lock)
{}

static const struct fuse_iqueue_ops virtio_fs_fiq_ops =;

static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
{}

static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
{}

static void virtio_fs_conn_destroy(struct fuse_mount *fm)
{}

static void virtio_kill_sb(struct super_block *sb)
{}

static int virtio_fs_test_super(struct super_block *sb,
				struct fs_context *fsc)
{}

static int virtio_fs_get_tree(struct fs_context *fsc)
{}

static const struct fs_context_operations virtio_fs_context_ops =;

static int virtio_fs_init_fs_context(struct fs_context *fsc)
{}

static struct file_system_type virtio_fs_type =;

static int virtio_fs_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{}

static const struct kset_uevent_ops virtio_fs_uevent_ops =;

static int __init virtio_fs_sysfs_init(void)
{}

static void virtio_fs_sysfs_exit(void)
{}

static int __init virtio_fs_init(void)
{}
module_init();

static void __exit virtio_fs_exit(void)
{}
module_exit(virtio_fs_exit);

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_ALIAS_FS();
MODULE_DEVICE_TABLE(virtio, id_table);