linux/drivers/gpu/drm/i915/gvt/gtt.c

/*
 * GTT virtualization
 *
 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 * Authors:
 *    Zhi Wang <[email protected]>
 *    Zhenyu Wang <[email protected]>
 *    Xiao Zheng <[email protected]>
 *
 * Contributors:
 *    Min He <[email protected]>
 *    Bing Niu <[email protected]>
 *
 */

#include "i915_drv.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#include "trace.h"

#include "gt/intel_gt_regs.h"
#include <linux/vmalloc.h>

#if defined(VERBOSE_DEBUG)
#define gvt_vdbg_mm
#else
#define gvt_vdbg_mm(fmt, args...)
#endif

static bool enable_out_of_sync =;
static int preallocated_oos_pages =;

/*
 * validate a gm address and related range size,
 * translate it to host gm address
 */
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
{}

/* translate a guest gmadr to host gmadr */
int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
{}

/* translate a host gmadr to guest gmadr */
int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
{}

int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
			     unsigned long *h_index)
{}

int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
			     unsigned long *g_index)
{}

#define gtt_type_is_entry(type)

#define gtt_type_is_pt(type)

#define gtt_type_is_pte_pt(type)

#define gtt_type_is_root_pointer(type)

#define gtt_init_entry(e, t, p, v)

/*
 * Mappings between GTT_TYPE* enumerations.
 * Following information can be found according to the given type:
 * - type of next level page table
 * - type of entry inside this level page table
 * - type of entry with PSE set
 *
 * If the given type doesn't have such a kind of information,
 * e.g. give a l4 root entry type, then request to get its PSE type,
 * give a PTE page table type, then request to get its next level page
 * table type, as we know l4 root entry doesn't have a PSE bit,
 * and a PTE page table doesn't have a next level page table type,
 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
 * page table.
 */

struct gtt_type_table_entry {};

#define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type)

static const struct gtt_type_table_entry gtt_type_table[] =;

static inline int get_next_pt_type(int type)
{}

static inline int get_entry_type(int type)
{}

static inline int get_pse_type(int type)
{}

static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
{}

static void ggtt_invalidate(struct intel_gt *gt)
{}

static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
{}

static inline int gtt_get_entry64(void *pt,
		struct intel_gvt_gtt_entry *e,
		unsigned long index, bool hypervisor_access, unsigned long gpa,
		struct intel_vgpu *vgpu)
{}

static inline int gtt_set_entry64(void *pt,
		struct intel_gvt_gtt_entry *e,
		unsigned long index, bool hypervisor_access, unsigned long gpa,
		struct intel_vgpu *vgpu)
{}

#define GTT_HAW

#define ADDR_1G_MASK
#define ADDR_2M_MASK
#define ADDR_64K_MASK
#define ADDR_4K_MASK

#define GTT_SPTE_FLAG_MASK
#define GTT_SPTE_FLAG_64K_SPLITED

#define GTT_64K_PTE_STRIDE

static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{}

static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
{}

static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
{}

static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
{}

static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
{}

static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
{}

static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
{}

static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
{}

static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
{}

static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
{}

static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
{}

static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
{}

/*
 * Per-platform GMA routines.
 */
static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
{}

#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp)

DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));

static const struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops =;

static const struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops =;

/* Update entry type per pse and ips bit. */
static void update_entry_type_for_real(const struct intel_gvt_gtt_pte_ops *pte_ops,
	struct intel_gvt_gtt_entry *entry, bool ips)
{}

/*
 * MM helpers.
 */
static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
		struct intel_gvt_gtt_entry *entry, unsigned long index,
		bool guest)
{}

static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
		struct intel_gvt_gtt_entry *entry, unsigned long index)
{}

static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
		struct intel_gvt_gtt_entry *entry, unsigned long index)
{}

static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
		struct intel_gvt_gtt_entry *entry, unsigned long index,
		bool guest)
{}

static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
		struct intel_gvt_gtt_entry *entry, unsigned long index)
{}

static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
		struct intel_gvt_gtt_entry *entry, unsigned long index)
{}

static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
		struct intel_gvt_gtt_entry *entry, unsigned long index)
{}

static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
		struct intel_gvt_gtt_entry *entry, unsigned long index)
{}

static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
		struct intel_gvt_gtt_entry *entry, unsigned long index)
{}

/*
 * PPGTT shadow page table helpers.
 */
static inline int ppgtt_spt_get_entry(
		struct intel_vgpu_ppgtt_spt *spt,
		void *page_table, int type,
		struct intel_gvt_gtt_entry *e, unsigned long index,
		bool guest)
{}

static inline int ppgtt_spt_set_entry(
		struct intel_vgpu_ppgtt_spt *spt,
		void *page_table, int type,
		struct intel_gvt_gtt_entry *e, unsigned long index,
		bool guest)
{}

#define ppgtt_get_guest_entry(spt, e, index)

#define ppgtt_set_guest_entry(spt, e, index)

#define ppgtt_get_shadow_entry(spt, e, index)

#define ppgtt_set_shadow_entry(spt, e, index)

static void *alloc_spt(gfp_t gfp_mask)
{}

static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
{}

static int detach_oos_page(struct intel_vgpu *vgpu,
		struct intel_vgpu_oos_page *oos_page);

static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
{}

static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
{}

static int ppgtt_handle_guest_write_page_table_bytes(
		struct intel_vgpu_ppgtt_spt *spt,
		u64 pa, void *p_data, int bytes);

static int ppgtt_write_protection_handler(
		struct intel_vgpu_page_track *page_track,
		u64 gpa, void *data, int bytes)
{}

/* Find a spt by guest gfn. */
static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
		struct intel_vgpu *vgpu, unsigned long gfn)
{}

/* Find the spt by shadow page mfn. */
static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
		struct intel_vgpu *vgpu, unsigned long mfn)
{}

static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);

/* Allocate shadow page table without guest page. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
		struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
{}

/* Allocate shadow page table associated with specific gfn. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
		struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
		unsigned long gfn, bool guest_pde_ips)
{}

#define pt_entry_size_shift(spt)

#define pt_entries(spt)

#define for_each_present_guest_entry(spt, e, i)

#define for_each_present_shadow_entry(spt, e, i)

#define for_each_shadow_entry(spt, e, i)

static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
{}

static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
{}

static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);

static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
		struct intel_gvt_gtt_entry *e)
{}

static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
		struct intel_gvt_gtt_entry *entry)
{}

static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
{}

static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
{}

static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);

static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
		struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
{}

static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
		struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
{}

static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
	struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
	struct intel_gvt_gtt_entry *se)
{}

static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
	struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
	struct intel_gvt_gtt_entry *se)
{}

static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
	struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
	struct intel_gvt_gtt_entry *ge)
{}

static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
{}

static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
		struct intel_gvt_gtt_entry *se, unsigned long index)
{}

static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
		struct intel_gvt_gtt_entry *we, unsigned long index)
{}

static int sync_oos_page(struct intel_vgpu *vgpu,
		struct intel_vgpu_oos_page *oos_page)
{}

static int detach_oos_page(struct intel_vgpu *vgpu,
		struct intel_vgpu_oos_page *oos_page)
{}

static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
		struct intel_vgpu_ppgtt_spt *spt)
{}

static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
{}

static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
{}

static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
{}

/**
 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
 * @vgpu: a vGPU
 *
 * This function is called before submitting a guest workload to host,
 * to sync all the out-of-synced shadow for vGPU
 *
 * Returns:
 * Zero on success, negative error code if failed.
 */
int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
{}

/*
 * The heart of PPGTT shadow page table.
 */
static int ppgtt_handle_guest_write_page_table(
		struct intel_vgpu_ppgtt_spt *spt,
		struct intel_gvt_gtt_entry *we, unsigned long index)
{}



static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
{}

static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
		unsigned long index)
{}

/**
 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
 * @vgpu: a vGPU
 *
 * This function is called before submitting a guest workload to host,
 * to flush all the post shadows for a vGPU.
 *
 * Returns:
 * Zero on success, negative error code if failed.
 */
int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
{}

static int ppgtt_handle_guest_write_page_table_bytes(
		struct intel_vgpu_ppgtt_spt *spt,
		u64 pa, void *p_data, int bytes)
{}

static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
{}


static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
{}

static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
{}

static void vgpu_free_mm(struct intel_vgpu_mm *mm)
{}

/**
 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
 * @vgpu: a vGPU
 * @root_entry_type: ppgtt root entry type
 * @pdps: guest pdps.
 *
 * This function is used to create a ppgtt mm object for a vGPU.
 *
 * Returns:
 * Zero on success, negative error code in pointer if failed.
 */
struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
		enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
{}

static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
{}

/**
 * _intel_vgpu_mm_release - destroy a mm object
 * @mm_ref: a kref object
 *
 * This function is used to destroy a mm object for vGPU
 *
 */
void _intel_vgpu_mm_release(struct kref *mm_ref)
{}

/**
 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
 * @mm: a vGPU mm object
 *
 * This function is called when user doesn't want to use a vGPU mm object
 */
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
{}

/**
 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
 * @mm: target vgpu mm
 *
 * This function is called when user wants to use a vGPU mm object. If this
 * mm object hasn't been shadowed yet, the shadow will be populated at this
 * time.
 *
 * Returns:
 * Zero on success, negative error code if failed.
 */
int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
{}

static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
{}

/*
 * GMA translation APIs.
 */
static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
		struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
{}

/**
 * intel_vgpu_gma_to_gpa - translate a gma to GPA
 * @mm: mm object. could be a PPGTT or GGTT mm object
 * @gma: graphics memory address in this mm object
 *
 * This function is used to translate a graphics memory address in specific
 * graphics memory space to guest physical address.
 *
 * Returns:
 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
 */
unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
{}

static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
	unsigned int off, void *p_data, unsigned int bytes)
{}

/**
 * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read
 * @vgpu: a vGPU
 * @off: register offset
 * @p_data: data will be returned to guest
 * @bytes: data length
 *
 * This function is used to emulate the GTT MMIO register read
 *
 * Returns:
 * Zero on success, error code if failed.
 */
int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
	void *p_data, unsigned int bytes)
{}

static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
		struct intel_gvt_gtt_entry *entry)
{}

static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
	void *p_data, unsigned int bytes)
{}

/*
 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
 * @vgpu: a vGPU
 * @off: register offset
 * @p_data: data from guest write
 * @bytes: data length
 *
 * This function is used to emulate the GTT MMIO register write
 *
 * Returns:
 * Zero on success, error code if failed.
 */
int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
		unsigned int off, void *p_data, unsigned int bytes)
{}

static int alloc_scratch_pages(struct intel_vgpu *vgpu,
		enum intel_gvt_gtt_type type)
{}

static int release_scratch_page_tree(struct intel_vgpu *vgpu)
{}

static int create_scratch_page_tree(struct intel_vgpu *vgpu)
{}

/**
 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
 * @vgpu: a vGPU
 *
 * This function is used to initialize per-vGPU graphics memory virtualization
 * components.
 *
 * Returns:
 * Zero on success, error code if failed.
 */
int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
{}

void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
{}

static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
{}

/**
 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
 * @vgpu: a vGPU
 *
 * This function is used to clean up per-vGPU graphics memory virtualization
 * components.
 *
 * Returns:
 * Zero on success, error code if failed.
 */
void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
{}

static void clean_spt_oos(struct intel_gvt *gvt)
{}

static int setup_spt_oos(struct intel_gvt *gvt)
{}

/**
 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
 * @vgpu: a vGPU
 * @pdps: pdp root array
 *
 * This function is used to find a PPGTT mm object from mm object pool
 *
 * Returns:
 * pointer to mm object on success, NULL if failed.
 */
struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
		u64 pdps[])
{}

/**
 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
 * @vgpu: a vGPU
 * @root_entry_type: ppgtt root entry type
 * @pdps: guest pdps
 *
 * This function is used to find or create a PPGTT mm object from a guest.
 *
 * Returns:
 * Zero on success, negative error code if failed.
 */
struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
		enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
{}

/**
 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
 * @vgpu: a vGPU
 * @pdps: guest pdps
 *
 * This function is used to find a PPGTT mm object from a guest and destroy it.
 *
 * Returns:
 * Zero on success, negative error code if failed.
 */
int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
{}

/**
 * intel_gvt_init_gtt - initialize mm components of a GVT device
 * @gvt: GVT device
 *
 * This function is called at the initialization stage, to initialize
 * the mm components of a GVT device.
 *
 * Returns:
 * zero on success, negative error code if failed.
 */
int intel_gvt_init_gtt(struct intel_gvt *gvt)
{}

/**
 * intel_gvt_clean_gtt - clean up mm components of a GVT device
 * @gvt: GVT device
 *
 * This function is called at the driver unloading stage, to clean up
 * the mm components of a GVT device.
 *
 */
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{}

/**
 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
 * @vgpu: a vGPU
 *
 * This function is called when invalidate all PPGTT instances of a vGPU.
 *
 */
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
{}

/**
 * intel_vgpu_reset_ggtt - reset the GGTT entry
 * @vgpu: a vGPU
 * @invalidate_old: invalidate old entries
 *
 * This function is called at the vGPU create stage
 * to reset all the GGTT entries.
 *
 */
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
{}

/**
 * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
 * @gvt: intel gvt device
 *
 * This function is called at driver resume stage to restore
 * GGTT entries of every vGPU.
 *
 */
void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
{}