#include "cmd.h"
enum { … };
static int mlx5vf_is_migratable(struct mlx5_core_dev *mdev, u16 func_id)
{ … }
static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
u16 *vhca_id);
static void
_mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev);
int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
{ … }
int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
{ … }
int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
size_t *state_size, u64 *total_size,
u8 query_flags)
{ … }
static void set_tracker_change_event(struct mlx5vf_pci_core_device *mvdev)
{ … }
static void set_tracker_error(struct mlx5vf_pci_core_device *mvdev)
{ … }
static int mlx5fv_vf_event(struct notifier_block *nb,
unsigned long event, void *data)
{ … }
void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev)
{ … }
void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
{ … }
void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
const struct vfio_migration_ops *mig_ops,
const struct vfio_log_ops *log_ops)
{ … }
static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
u16 *vhca_id)
{ … }
static int _create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
struct mlx5_vhca_data_buffer *buf,
struct mlx5_vhca_recv_buf *recv_buf,
u32 *mkey)
{ … }
static int mlx5vf_dma_data_buffer(struct mlx5_vhca_data_buffer *buf)
{ … }
void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf)
{ … }
static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
unsigned int npages)
{ … }
struct mlx5_vhca_data_buffer *
mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf,
size_t length,
enum dma_data_direction dma_dir)
{ … }
void mlx5vf_put_data_buffer(struct mlx5_vhca_data_buffer *buf)
{ … }
struct mlx5_vhca_data_buffer *
mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf,
size_t length, enum dma_data_direction dma_dir)
{ … }
static void
mlx5vf_save_callback_complete(struct mlx5_vf_migration_file *migf,
struct mlx5vf_async_data *async_data)
{ … }
void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work)
{ … }
static int add_buf_header(struct mlx5_vhca_data_buffer *header_buf,
size_t image_size, bool initial_pre_copy)
{ … }
static void mlx5vf_save_callback(int status, struct mlx5_async_work *context)
{ … }
int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf,
struct mlx5_vhca_data_buffer *buf, bool inc,
bool track)
{ … }
int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf,
struct mlx5_vhca_data_buffer *buf)
{ … }
int mlx5vf_cmd_alloc_pd(struct mlx5_vf_migration_file *migf)
{ … }
void mlx5vf_cmd_dealloc_pd(struct mlx5_vf_migration_file *migf)
{ … }
void mlx5fv_cmd_clean_migf_resources(struct mlx5_vf_migration_file *migf)
{ … }
static int mlx5vf_create_tracker(struct mlx5_core_dev *mdev,
struct mlx5vf_pci_core_device *mvdev,
struct rb_root_cached *ranges, u32 nnodes)
{ … }
static int mlx5vf_cmd_destroy_tracker(struct mlx5_core_dev *mdev,
u32 tracker_id)
{ … }
static int mlx5vf_cmd_modify_tracker(struct mlx5_core_dev *mdev,
u32 tracker_id, unsigned long iova,
unsigned long length, u32 tracker_state)
{ … }
static int mlx5vf_cmd_query_tracker(struct mlx5_core_dev *mdev,
struct mlx5_vhca_page_tracker *tracker)
{ … }
static int alloc_cq_frag_buf(struct mlx5_core_dev *mdev,
struct mlx5_vhca_cq_buf *buf, int nent,
int cqe_size)
{ … }
static void init_cq_frag_buf(struct mlx5_vhca_cq_buf *buf)
{ … }
static void mlx5vf_destroy_cq(struct mlx5_core_dev *mdev,
struct mlx5_vhca_cq *cq)
{ … }
static void mlx5vf_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
{ … }
static int mlx5vf_event_notifier(struct notifier_block *nb, unsigned long type,
void *data)
{ … }
static void mlx5vf_cq_complete(struct mlx5_core_cq *mcq,
struct mlx5_eqe *eqe)
{ … }
static int mlx5vf_create_cq(struct mlx5_core_dev *mdev,
struct mlx5_vhca_page_tracker *tracker,
size_t ncqe)
{ … }
static struct mlx5_vhca_qp *
mlx5vf_create_rc_qp(struct mlx5_core_dev *mdev,
struct mlx5_vhca_page_tracker *tracker, u32 max_recv_wr)
{ … }
static void mlx5vf_post_recv(struct mlx5_vhca_qp *qp)
{ … }
static int mlx5vf_activate_qp(struct mlx5_core_dev *mdev,
struct mlx5_vhca_qp *qp, u32 remote_qpn,
bool host_qp)
{ … }
static void mlx5vf_destroy_qp(struct mlx5_core_dev *mdev,
struct mlx5_vhca_qp *qp)
{ … }
static void free_recv_pages(struct mlx5_vhca_recv_buf *recv_buf)
{ … }
static int alloc_recv_pages(struct mlx5_vhca_recv_buf *recv_buf,
unsigned int npages)
{ … }
static int register_dma_recv_pages(struct mlx5_core_dev *mdev,
struct mlx5_vhca_recv_buf *recv_buf)
{ … }
static void unregister_dma_recv_pages(struct mlx5_core_dev *mdev,
struct mlx5_vhca_recv_buf *recv_buf)
{ … }
static void mlx5vf_free_qp_recv_resources(struct mlx5_core_dev *mdev,
struct mlx5_vhca_qp *qp)
{ … }
static int mlx5vf_alloc_qp_recv_resources(struct mlx5_core_dev *mdev,
struct mlx5_vhca_qp *qp, u32 pdn,
u64 rq_size)
{ … }
static void
_mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev)
{ … }
int mlx5vf_stop_page_tracker(struct vfio_device *vdev)
{ … }
int mlx5vf_start_page_tracker(struct vfio_device *vdev,
struct rb_root_cached *ranges, u32 nnodes,
u64 *page_size)
{ … }
static void
set_report_output(u32 size, int index, struct mlx5_vhca_qp *qp,
struct iova_bitmap *dirty)
{ … }
static void
mlx5vf_rq_cqe(struct mlx5_vhca_qp *qp, struct mlx5_cqe64 *cqe,
struct iova_bitmap *dirty, int *tracker_status)
{ … }
static void *get_cqe(struct mlx5_vhca_cq *cq, int n)
{ … }
static struct mlx5_cqe64 *get_sw_cqe(struct mlx5_vhca_cq *cq, int n)
{ … }
static int
mlx5vf_cq_poll_one(struct mlx5_vhca_cq *cq, struct mlx5_vhca_qp *qp,
struct iova_bitmap *dirty, int *tracker_status)
{ … }
int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
unsigned long length,
struct iova_bitmap *dirty)
{ … }