linux/virt/kvm/guest_memfd.c

// SPDX-License-Identifier: GPL-2.0
#include <linux/backing-dev.h>
#include <linux/falloc.h>
#include <linux/kvm_host.h>
#include <linux/pagemap.h>
#include <linux/anon_inodes.h>

#include "kvm_mm.h"

struct kvm_gmem {};

/**
 * folio_file_pfn - like folio_file_page, but return a pfn.
 * @folio: The folio which contains this index.
 * @index: The index we want to look up.
 *
 * Return: The pfn for this index.
 */
static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index)
{}

static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
				    pgoff_t index, struct folio *folio)
{}

static inline void kvm_gmem_mark_prepared(struct folio *folio)
{}

/*
 * Process @folio, which contains @gfn, so that the guest can use it.
 * The folio must be locked and the gfn must be contained in @slot.
 * On successful return the guest sees a zero page so as to avoid
 * leaking host data and the up-to-date flag is set.
 */
static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
				  gfn_t gfn, struct folio *folio)
{}

/*
 * Returns a locked folio on success.  The caller is responsible for
 * setting the up-to-date flag before the memory is mapped into the guest.
 * There is no backing storage for the memory, so the folio will remain
 * up-to-date until it's removed.
 *
 * Ignore accessed, referenced, and dirty flags.  The memory is
 * unevictable and there is no storage to write back to.
 */
static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
{}

static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
				      pgoff_t end)
{}

static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
				    pgoff_t end)
{}

static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{}

static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
{}

static long kvm_gmem_fallocate(struct file *file, int mode, loff_t offset,
			       loff_t len)
{}

static int kvm_gmem_release(struct inode *inode, struct file *file)
{}

static inline struct file *kvm_gmem_get_file(struct kvm_memory_slot *slot)
{}

static struct file_operations kvm_gmem_fops =;

void kvm_gmem_init(struct module *module)
{}

static int kvm_gmem_migrate_folio(struct address_space *mapping,
				  struct folio *dst, struct folio *src,
				  enum migrate_mode mode)
{}

static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *folio)
{}

#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
static void kvm_gmem_free_folio(struct folio *folio)
{}
#endif

static const struct address_space_operations kvm_gmem_aops =;

static int kvm_gmem_getattr(struct mnt_idmap *idmap, const struct path *path,
			    struct kstat *stat, u32 request_mask,
			    unsigned int query_flags)
{}

static int kvm_gmem_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
			    struct iattr *attr)
{}
static const struct inode_operations kvm_gmem_iops =;

static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
{}

int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
{}

int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
		  unsigned int fd, loff_t offset)
{}

void kvm_gmem_unbind(struct kvm_memory_slot *slot)
{}

/* Returns a locked folio on success.  */
static struct folio *
__kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
		   gfn_t gfn, kvm_pfn_t *pfn, bool *is_prepared,
		   int *max_order)
{}

int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
		     gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages,
		       kvm_gmem_populate_cb post_populate, void *opaque)
{}
EXPORT_SYMBOL_GPL();
#endif