linux/fs/fuse/dax.c

// SPDX-License-Identifier: GPL-2.0
/*
 * dax: direct host memory access
 * Copyright (C) 2020 Red Hat, Inc.
 */

#include "fuse_i.h"

#include <linux/delay.h>
#include <linux/dax.h>
#include <linux/uio.h>
#include <linux/pagemap.h>
#include <linux/pfn_t.h>
#include <linux/iomap.h>
#include <linux/interval_tree.h>

/*
 * Default memory range size.  A power of 2 so it agrees with common FUSE_INIT
 * map_alignment values 4KB and 64KB.
 */
#define FUSE_DAX_SHIFT
#define FUSE_DAX_SZ
#define FUSE_DAX_PAGES

/* Number of ranges reclaimer will try to free in one invocation */
#define FUSE_DAX_RECLAIM_CHUNK

/*
 * Dax memory reclaim threshold in percetage of total ranges. When free
 * number of free ranges drops below this threshold, reclaim can trigger
 * Default is 20%
 */
#define FUSE_DAX_RECLAIM_THRESHOLD

/** Translation information for file offsets to DAX window offsets */
struct fuse_dax_mapping {};

/* Per-inode dax map */
struct fuse_inode_dax {};

struct fuse_conn_dax {};

static inline struct fuse_dax_mapping *
node_to_dmap(struct interval_tree_node *node)
{}

static struct fuse_dax_mapping *
alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode);

static void
__kick_dmap_free_worker(struct fuse_conn_dax *fcd, unsigned long delay_ms)
{}

static void kick_dmap_free_worker(struct fuse_conn_dax *fcd,
				  unsigned long delay_ms)
{}

static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd)
{}

/* This assumes fcd->lock is held */
static void __dmap_remove_busy_list(struct fuse_conn_dax *fcd,
				    struct fuse_dax_mapping *dmap)
{}

static void dmap_remove_busy_list(struct fuse_conn_dax *fcd,
				  struct fuse_dax_mapping *dmap)
{}

/* This assumes fcd->lock is held */
static void __dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
				struct fuse_dax_mapping *dmap)
{}

static void dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
				struct fuse_dax_mapping *dmap)
{}

static int fuse_setup_one_mapping(struct inode *inode, unsigned long start_idx,
				  struct fuse_dax_mapping *dmap, bool writable,
				  bool upgrade)
{}

static int fuse_send_removemapping(struct inode *inode,
				   struct fuse_removemapping_in *inargp,
				   struct fuse_removemapping_one *remove_one)
{}

static int dmap_removemapping_list(struct inode *inode, unsigned int num,
				   struct list_head *to_remove)
{}

/*
 * Cleanup dmap entry and add back to free list. This should be called with
 * fcd->lock held.
 */
static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax *fcd,
					    struct fuse_dax_mapping *dmap)
{}

/*
 * Free inode dmap entries whose range falls inside [start, end].
 * Does not take any locks. At this point of time it should only be
 * called from evict_inode() path where we know all dmap entries can be
 * reclaimed.
 */
static void inode_reclaim_dmap_range(struct fuse_conn_dax *fcd,
				     struct inode *inode,
				     loff_t start, loff_t end)
{}

static int dmap_removemapping_one(struct inode *inode,
				  struct fuse_dax_mapping *dmap)
{}

/*
 * It is called from evict_inode() and by that time inode is going away. So
 * this function does not take any locks like fi->dax->sem for traversing
 * that fuse inode interval tree. If that lock is taken then lock validator
 * complains of deadlock situation w.r.t fs_reclaim lock.
 */
void fuse_dax_inode_cleanup(struct inode *inode)
{}

static void fuse_fill_iomap_hole(struct iomap *iomap, loff_t length)
{}

static void fuse_fill_iomap(struct inode *inode, loff_t pos, loff_t length,
			    struct iomap *iomap, struct fuse_dax_mapping *dmap,
			    unsigned int flags)
{}

static int fuse_setup_new_dax_mapping(struct inode *inode, loff_t pos,
				      loff_t length, unsigned int flags,
				      struct iomap *iomap)
{}

static int fuse_upgrade_dax_mapping(struct inode *inode, loff_t pos,
				    loff_t length, unsigned int flags,
				    struct iomap *iomap)
{}

/* This is just for DAX and the mapping is ephemeral, do not use it for other
 * purposes since there is no block device with a permanent mapping.
 */
static int fuse_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
			    unsigned int flags, struct iomap *iomap,
			    struct iomap *srcmap)
{}

static int fuse_iomap_end(struct inode *inode, loff_t pos, loff_t length,
			  ssize_t written, unsigned int flags,
			  struct iomap *iomap)
{}

static const struct iomap_ops fuse_iomap_ops =;

static void fuse_wait_dax_page(struct inode *inode)
{}

/* Should be called with mapping->invalidate_lock held exclusively */
static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
				    loff_t start, loff_t end)
{}

/* dmap_end == 0 leads to unmapping of whole file */
int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start,
				  u64 dmap_end)
{}

ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
{}

static bool file_extending_write(struct kiocb *iocb, struct iov_iter *from)
{}

static ssize_t fuse_dax_direct_write(struct kiocb *iocb, struct iov_iter *from)
{}

ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
{}

static int fuse_dax_writepages(struct address_space *mapping,
			       struct writeback_control *wbc)
{}

static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf, unsigned int order,
		bool write)
{}

static vm_fault_t fuse_dax_fault(struct vm_fault *vmf)
{}

static vm_fault_t fuse_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
{}

static vm_fault_t fuse_dax_page_mkwrite(struct vm_fault *vmf)
{}

static vm_fault_t fuse_dax_pfn_mkwrite(struct vm_fault *vmf)
{}

static const struct vm_operations_struct fuse_dax_vm_ops =;

int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma)
{}

static int dmap_writeback_invalidate(struct inode *inode,
				     struct fuse_dax_mapping *dmap)
{}

static int reclaim_one_dmap_locked(struct inode *inode,
				   struct fuse_dax_mapping *dmap)
{}

/* Find first mapped dmap for an inode and return file offset. Caller needs
 * to hold fi->dax->sem lock either shared or exclusive.
 */
static struct fuse_dax_mapping *inode_lookup_first_dmap(struct inode *inode)
{}

/*
 * Find first mapping in the tree and free it and return it. Do not add
 * it back to free pool.
 */
static struct fuse_dax_mapping *
inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
			      bool *retry)
{}

static struct fuse_dax_mapping *
alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
{}

static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd,
					  struct inode *inode,
					  unsigned long start_idx)
{}

/*
 * Free a range of memory.
 * Locking:
 * 1. Take mapping->invalidate_lock to block dax faults.
 * 2. Take fi->dax->sem to protect interval tree and also to make sure
 *    read/write can not reuse a dmap which we might be freeing.
 */
static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
				   struct inode *inode,
				   unsigned long start_idx,
				   unsigned long end_idx)
{}

static int try_to_free_dmap_chunks(struct fuse_conn_dax *fcd,
				   unsigned long nr_to_free)
{}

static void fuse_dax_free_mem_worker(struct work_struct *work)
{}

static void fuse_free_dax_mem_ranges(struct list_head *mem_list)
{}

void fuse_dax_conn_free(struct fuse_conn *fc)
{}

static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
{}

int fuse_dax_conn_alloc(struct fuse_conn *fc, enum fuse_dax_mode dax_mode,
			struct dax_device *dax_dev)
{}

bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi)
{}

static const struct address_space_operations fuse_dax_file_aops  =;

static bool fuse_should_enable_dax(struct inode *inode, unsigned int flags)
{}

void fuse_dax_inode_init(struct inode *inode, unsigned int flags)
{}

void fuse_dax_dontcache(struct inode *inode, unsigned int flags)
{}

bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment)
{}

void fuse_dax_cancel_work(struct fuse_conn *fc)
{}
EXPORT_SYMBOL_GPL();