linux/lib/test_hmm.c

// SPDX-License-Identifier: GPL-2.0
/*
 * This is a module to test the HMM (Heterogeneous Memory Management)
 * mirror and zone device private memory migration APIs of the kernel.
 * Userspace programs can register with the driver to mirror their own address
 * space and can use the device to read/write any valid virtual address.
 */
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/memremap.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/delay.h>
#include <linux/pagemap.h>
#include <linux/hmm.h>
#include <linux/vmalloc.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/sched/mm.h>
#include <linux/platform_device.h>
#include <linux/rmap.h>
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>

#include "test_hmm_uapi.h"

#define DMIRROR_NDEVICES
#define DMIRROR_RANGE_FAULT_TIMEOUT
#define DEVMEM_CHUNK_SIZE
#define DEVMEM_CHUNKS_RESERVE

/*
 * For device_private pages, dpage is just a dummy struct page
 * representing a piece of device memory. dmirror_devmem_alloc_page
 * allocates a real system memory page as backing storage to fake a
 * real device. zone_device_data points to that backing page. But
 * for device_coherent memory, the struct page represents real
 * physical CPU-accessible memory that we can use directly.
 */
#define BACKING_PAGE(page)

static unsigned long spm_addr_dev0;
module_param(spm_addr_dev0, long, 0644);
MODULE_PARM_DESC();

static unsigned long spm_addr_dev1;
module_param(spm_addr_dev1, long, 0644);
MODULE_PARM_DESC();

static const struct dev_pagemap_ops dmirror_devmem_ops;
static const struct mmu_interval_notifier_ops dmirror_min_ops;
static dev_t dmirror_dev;

struct dmirror_device;

struct dmirror_bounce {};

#define DPT_XA_TAG_ATOMIC
#define DPT_XA_TAG_WRITE

/*
 * Data structure to track address ranges and register for mmu interval
 * notifier updates.
 */
struct dmirror_interval {};

/*
 * Data attached to the open device file.
 * Note that it might be shared after a fork().
 */
struct dmirror {};

/*
 * ZONE_DEVICE pages for migration and simulating device memory.
 */
struct dmirror_chunk {};

/*
 * Per device data.
 */
struct dmirror_device {};

static struct dmirror_device dmirror_devices[DMIRROR_NDEVICES];

static int dmirror_bounce_init(struct dmirror_bounce *bounce,
			       unsigned long addr,
			       unsigned long size)
{}

static bool dmirror_is_private_zone(struct dmirror_device *mdevice)
{}

static enum migrate_vma_direction
dmirror_select_device(struct dmirror *dmirror)
{}

static void dmirror_bounce_fini(struct dmirror_bounce *bounce)
{}

static int dmirror_fops_open(struct inode *inode, struct file *filp)
{}

static int dmirror_fops_release(struct inode *inode, struct file *filp)
{}

static struct dmirror_chunk *dmirror_page_to_chunk(struct page *page)
{}

static struct dmirror_device *dmirror_page_to_device(struct page *page)

{}

static int dmirror_do_fault(struct dmirror *dmirror, struct hmm_range *range)
{}

static void dmirror_do_update(struct dmirror *dmirror, unsigned long start,
			      unsigned long end)
{}

static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
				const struct mmu_notifier_range *range,
				unsigned long cur_seq)
{}

static const struct mmu_interval_notifier_ops dmirror_min_ops =;

static int dmirror_range_fault(struct dmirror *dmirror,
				struct hmm_range *range)
{}

static int dmirror_fault(struct dmirror *dmirror, unsigned long start,
			 unsigned long end, bool write)
{}

static int dmirror_do_read(struct dmirror *dmirror, unsigned long start,
			   unsigned long end, struct dmirror_bounce *bounce)
{}

static int dmirror_read(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd)
{}

static int dmirror_do_write(struct dmirror *dmirror, unsigned long start,
			    unsigned long end, struct dmirror_bounce *bounce)
{}

static int dmirror_write(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd)
{}

static int dmirror_allocate_chunk(struct dmirror_device *mdevice,
				   struct page **ppage)
{}

static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
{}

static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
					   struct dmirror *dmirror)
{}

static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
			     unsigned long end)
{}

static int dmirror_atomic_map(unsigned long start, unsigned long end,
			      struct page **pages, struct dmirror *dmirror)
{}

static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
					    struct dmirror *dmirror)
{}

static int dmirror_exclusive(struct dmirror *dmirror,
			     struct hmm_dmirror_cmd *cmd)
{}

static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
						      struct dmirror *dmirror)
{}

static unsigned long
dmirror_successful_migrated_pages(struct migrate_vma *migrate)
{}

static int dmirror_migrate_to_system(struct dmirror *dmirror,
				     struct hmm_dmirror_cmd *cmd)
{}

static int dmirror_migrate_to_device(struct dmirror *dmirror,
				struct hmm_dmirror_cmd *cmd)
{}

static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range,
			    unsigned char *perm, unsigned long entry)
{}

static bool dmirror_snapshot_invalidate(struct mmu_interval_notifier *mni,
				const struct mmu_notifier_range *range,
				unsigned long cur_seq)
{}

static const struct mmu_interval_notifier_ops dmirror_mrn_ops =;

static int dmirror_range_snapshot(struct dmirror *dmirror,
				  struct hmm_range *range,
				  unsigned char *perm)
{}

static int dmirror_snapshot(struct dmirror *dmirror,
			    struct hmm_dmirror_cmd *cmd)
{}

static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
{}

/* Removes free pages from the free list so they can't be re-allocated */
static void dmirror_remove_free_pages(struct dmirror_chunk *devmem)
{}

static void dmirror_device_remove_chunks(struct dmirror_device *mdevice)
{}

static long dmirror_fops_unlocked_ioctl(struct file *filp,
					unsigned int command,
					unsigned long arg)
{}

static int dmirror_fops_mmap(struct file *file, struct vm_area_struct *vma)
{}

static const struct file_operations dmirror_fops =;

static void dmirror_devmem_free(struct page *page)
{}

static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
{}

static const struct dev_pagemap_ops dmirror_devmem_ops =;

static int dmirror_device_init(struct dmirror_device *mdevice, int id)
{}

static void dmirror_device_remove(struct dmirror_device *mdevice)
{}

static int __init hmm_dmirror_init(void)
{}

static void __exit hmm_dmirror_exit(void)
{}

module_init();
module_exit(hmm_dmirror_exit);
MODULE_DESCRIPTION();
MODULE_LICENSE();