linux/fs/fuse/dev.c

/*
  FUSE: Filesystem in Userspace
  Copyright (C) 2001-2008  Miklos Szeredi <[email protected]>

  This program can be distributed under the terms of the GNU GPL.
  See the file COPYING.
*/

#include "fuse_i.h"

#include <linux/init.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/sched/signal.h>
#include <linux/uio.h>
#include <linux/miscdevice.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/pipe_fs_i.h>
#include <linux/swap.h>
#include <linux/splice.h>
#include <linux/sched.h>

MODULE_ALIAS_MISCDEV();
MODULE_ALIAS();

/* Ordinary requests have even IDs, while interrupts IDs are odd */
#define FUSE_INT_REQ_BIT
#define FUSE_REQ_ID_STEP

static struct kmem_cache *fuse_req_cachep;

static struct fuse_dev *fuse_get_dev(struct file *file)
{}

static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
{}

static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags)
{}

static void fuse_request_free(struct fuse_req *req)
{}

static void __fuse_get_request(struct fuse_req *req)
{}

/* Must be called with > 1 refcount */
static void __fuse_put_request(struct fuse_req *req)
{}

void fuse_set_initialized(struct fuse_conn *fc)
{}

static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
{}

static void fuse_drop_waiting(struct fuse_conn *fc)
{}

static void fuse_put_request(struct fuse_req *req);

static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
{}

static void fuse_put_request(struct fuse_req *req)
{}

unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
{}
EXPORT_SYMBOL_GPL();

u64 fuse_get_unique(struct fuse_iqueue *fiq)
{}
EXPORT_SYMBOL_GPL();

static unsigned int fuse_req_hash(u64 unique)
{}

/*
 * A new request is available, wake fiq->waitq
 */
static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq)
__releases(fiq->lock)
{}

const struct fuse_iqueue_ops fuse_dev_fiq_ops =;
EXPORT_SYMBOL_GPL();

static void queue_request_and_unlock(struct fuse_iqueue *fiq,
				     struct fuse_req *req)
__releases(fiq->lock)
{}

void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
		       u64 nodeid, u64 nlookup)
{}

static void flush_bg_queue(struct fuse_conn *fc)
{}

/*
 * This function is called when a request is finished.  Either a reply
 * has arrived or it was aborted (and not yet sent) or some error
 * occurred during communication with userspace, or the device file
 * was closed.  The requester thread is woken up (if still waiting),
 * the 'end' callback is called if given, else the reference to the
 * request is released
 */
void fuse_request_end(struct fuse_req *req)
{}
EXPORT_SYMBOL_GPL();

static int queue_interrupt(struct fuse_req *req)
{}

static void request_wait_answer(struct fuse_req *req)
{}

static void __fuse_request_send(struct fuse_req *req)
{}

static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
{}

static void fuse_force_creds(struct fuse_req *req)
{}

static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
{}

ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
{}

static bool fuse_request_queue_background(struct fuse_req *req)
{}

int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
			    gfp_t gfp_flags)
{}
EXPORT_SYMBOL_GPL();

static int fuse_simple_notify_reply(struct fuse_mount *fm,
				    struct fuse_args *args, u64 unique)
{}

/*
 * Lock the request.  Up to the next unlock_request() there mustn't be
 * anything that could cause a page-fault.  If the request was already
 * aborted bail out.
 */
static int lock_request(struct fuse_req *req)
{}

/*
 * Unlock request.  If it was aborted while locked, caller is responsible
 * for unlocking and ending the request.
 */
static int unlock_request(struct fuse_req *req)
{}

struct fuse_copy_state {};

static void fuse_copy_init(struct fuse_copy_state *cs, int write,
			   struct iov_iter *iter)
{}

/* Unmap and put previous page of userspace buffer */
static void fuse_copy_finish(struct fuse_copy_state *cs)
{}

/*
 * Get another pagefull of userspace buffer, and map it to kernel
 * address space, and lock request
 */
static int fuse_copy_fill(struct fuse_copy_state *cs)
{}

/* Do as much copy to/from userspace buffer as we can */
static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
{}

static int fuse_check_folio(struct folio *folio)
{}

static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
{}

static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
			 unsigned offset, unsigned count)
{}

/*
 * Copy a page in the request to/from the userspace buffer.  Must be
 * done atomically
 */
static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
			  unsigned offset, unsigned count, int zeroing)
{}

/* Copy pages in the request to/from userspace buffer */
static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
			   int zeroing)
{}

/* Copy a single argument in the request to/from userspace buffer */
static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
{}

/* Copy request arguments to/from userspace buffer */
static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
			  unsigned argpages, struct fuse_arg *args,
			  int zeroing)
{}

static int forget_pending(struct fuse_iqueue *fiq)
{}

static int request_pending(struct fuse_iqueue *fiq)
{}

/*
 * Transfer an interrupt request to userspace
 *
 * Unlike other requests this is assembled on demand, without a need
 * to allocate a separate fuse_req structure.
 *
 * Called with fiq->lock held, releases it
 */
static int fuse_read_interrupt(struct fuse_iqueue *fiq,
			       struct fuse_copy_state *cs,
			       size_t nbytes, struct fuse_req *req)
__releases(fiq->lock)
{}

struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
					     unsigned int max,
					     unsigned int *countp)
{}
EXPORT_SYMBOL();

static int fuse_read_single_forget(struct fuse_iqueue *fiq,
				   struct fuse_copy_state *cs,
				   size_t nbytes)
__releases(fiq->lock)
{}

static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
				   struct fuse_copy_state *cs, size_t nbytes)
__releases(fiq->lock)
{}

static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
			    struct fuse_copy_state *cs,
			    size_t nbytes)
__releases(fiq->lock)
{}

/*
 * Read a single request into the userspace filesystem's buffer.  This
 * function waits until a request is available, then removes it from
 * the pending list and copies request data to userspace buffer.  If
 * no reply is needed (FORGET) or request has been aborted or there
 * was an error during the copying then it's finished by calling
 * fuse_request_end().  Otherwise add it to the processing list, and set
 * the 'sent' flag.
 */
static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
				struct fuse_copy_state *cs, size_t nbytes)
{}

static int fuse_dev_open(struct inode *inode, struct file *file)
{}

static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
{}

static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
				    struct pipe_inode_info *pipe,
				    size_t len, unsigned int flags)
{}

static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
			    struct fuse_copy_state *cs)
{}

static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
				   struct fuse_copy_state *cs)
{}

static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
				   struct fuse_copy_state *cs)
{}

static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
			      struct fuse_copy_state *cs)
{}

static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
			     struct fuse_copy_state *cs)
{}

struct fuse_retrieve_args {};

static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args,
			      int error)
{}

static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
			 struct fuse_notify_retrieve_out *outarg)
{}

static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
				struct fuse_copy_state *cs)
{}

/*
 * Resending all processing queue requests.
 *
 * During a FUSE daemon panics and failover, it is possible for some inflight
 * requests to be lost and never returned. As a result, applications awaiting
 * replies would become stuck forever. To address this, we can use notification
 * to trigger resending of these pending requests to the FUSE daemon, ensuring
 * they are properly processed again.
 *
 * Please note that this strategy is applicable only to idempotent requests or
 * if the FUSE daemon takes careful measures to avoid processing duplicated
 * non-idempotent requests.
 */
static void fuse_resend(struct fuse_conn *fc)
{}

static int fuse_notify_resend(struct fuse_conn *fc)
{}

static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
		       unsigned int size, struct fuse_copy_state *cs)
{}

/* Look up request on processing list by unique ID */
static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
{}

static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
			 unsigned nbytes)
{}

/*
 * Write a single reply to a request.  First the header is copied from
 * the write buffer.  The request is then searched on the processing
 * list by the unique ID found in the header.  If found, then remove
 * it from the list and copy the rest of the buffer to the request.
 * The request is finished by calling fuse_request_end().
 */
static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
				 struct fuse_copy_state *cs, size_t nbytes)
{}

static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
{}

static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
				     struct file *out, loff_t *ppos,
				     size_t len, unsigned int flags)
{}

static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
{}

/* Abort all requests on the given list (pending or processing) */
static void end_requests(struct list_head *head)
{}

static void end_polls(struct fuse_conn *fc)
{}

/*
 * Abort all requests.
 *
 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
 * filesystem.
 *
 * The same effect is usually achievable through killing the filesystem daemon
 * and all users of the filesystem.  The exception is the combination of an
 * asynchronous request and the tricky deadlock (see
 * Documentation/filesystems/fuse.rst).
 *
 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
 * requests, they should be finished off immediately.  Locked requests will be
 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
 * requests.  It is possible that some request will finish before we can.  This
 * is OK, the request will in that case be removed from the list before we touch
 * it.
 */
void fuse_abort_conn(struct fuse_conn *fc)
{}
EXPORT_SYMBOL_GPL();

void fuse_wait_aborted(struct fuse_conn *fc)
{}

int fuse_dev_release(struct inode *inode, struct file *file)
{}
EXPORT_SYMBOL_GPL();

static int fuse_dev_fasync(int fd, struct file *file, int on)
{}

static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
{}

static long fuse_dev_ioctl_clone(struct file *file, __u32 __user *argp)
{}

static long fuse_dev_ioctl_backing_open(struct file *file,
					struct fuse_backing_map __user *argp)
{}

static long fuse_dev_ioctl_backing_close(struct file *file, __u32 __user *argp)
{}

static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
			   unsigned long arg)
{}

const struct file_operations fuse_dev_operations =;
EXPORT_SYMBOL_GPL();

static struct miscdevice fuse_miscdevice =;

int __init fuse_dev_init(void)
{}

void fuse_dev_cleanup(void)
{}