linux/drivers/gpu/drm/i915/i915_vma.c

/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

#include <linux/sched/mm.h>
#include <linux/dma-fence-array.h>
#include <drm/drm_gem.h>

#include "display/intel_display.h"
#include "display/intel_frontbuffer.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object_frontbuffer.h"
#include "gem/i915_gem_tiling.h"
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_tlb.h"

#include "i915_drv.h"
#include "i915_gem_evict.h"
#include "i915_sw_fence_work.h"
#include "i915_trace.h"
#include "i915_vma.h"
#include "i915_vma_resource.h"

static inline void assert_vma_held_evict(const struct i915_vma *vma)
{}

static struct kmem_cache *slab_vmas;

static struct i915_vma *i915_vma_alloc(void)
{}

static void i915_vma_free(struct i915_vma *vma)
{}

#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)

#include <linux/stackdepot.h>

static void vma_print_allocator(struct i915_vma *vma, const char *reason)
{
	char buf[512];

	if (!vma->node.stack) {
		drm_dbg(vma->obj->base.dev,
			"vma.node [%08llx + %08llx] %s: unknown owner\n",
			vma->node.start, vma->node.size, reason);
		return;
	}

	stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
	drm_dbg(vma->obj->base.dev,
		"vma.node [%08llx + %08llx] %s: inserted at %s\n",
		vma->node.start, vma->node.size, reason, buf);
}

#else

static void vma_print_allocator(struct i915_vma *vma, const char *reason)
{}

#endif

static inline struct i915_vma *active_to_vma(struct i915_active *ref)
{}

static int __i915_vma_active(struct i915_active *ref)
{}

static void __i915_vma_retire(struct i915_active *ref)
{}

static struct i915_vma *
vma_create(struct drm_i915_gem_object *obj,
	   struct i915_address_space *vm,
	   const struct i915_gtt_view *view)
{}

static struct i915_vma *
i915_vma_lookup(struct drm_i915_gem_object *obj,
	   struct i915_address_space *vm,
	   const struct i915_gtt_view *view)
{}

/**
 * i915_vma_instance - return the singleton instance of the VMA
 * @obj: parent &struct drm_i915_gem_object to be mapped
 * @vm: address space in which the mapping is located
 * @view: additional mapping requirements
 *
 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
 * the same @view characteristics. If a match is not found, one is created.
 * Once created, the VMA is kept until either the object is freed, or the
 * address space is closed.
 *
 * Returns the vma, or an error pointer.
 */
struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object *obj,
		  struct i915_address_space *vm,
		  const struct i915_gtt_view *view)
{}

struct i915_vma_work {};

static void __vma_bind(struct dma_fence_work *work)
{}

static void __vma_release(struct dma_fence_work *work)
{}

static const struct dma_fence_work_ops bind_ops =;

struct i915_vma_work *i915_vma_work(void)
{}

int i915_vma_wait_for_bind(struct i915_vma *vma)
{}

#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
static int i915_vma_verify_bind_complete(struct i915_vma *vma)
{
	struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
	int err;

	if (!fence)
		return 0;

	if (dma_fence_is_signaled(fence))
		err = fence->error;
	else
		err = -EBUSY;

	dma_fence_put(fence);

	return err;
}
#else
#define i915_vma_verify_bind_complete(_vma)
#endif

I915_SELFTEST_EXPORT void
i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
				struct i915_vma *vma)
{}

/**
 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
 * @vma: VMA to map
 * @pat_index: PAT index to set in PTE
 * @flags: flags like global or local mapping
 * @work: preallocated worker for allocating and binding the PTE
 * @vma_res: pointer to a preallocated vma resource. The resource is either
 * consumed or freed.
 *
 * DMA addresses are taken from the scatter-gather table of this object (or of
 * this VMA in case of non-default GGTT views) and PTE entries set up.
 * Note that DMA addresses are also the only part of the SG table we care about.
 */
int i915_vma_bind(struct i915_vma *vma,
		  unsigned int pat_index,
		  u32 flags,
		  struct i915_vma_work *work,
		  struct i915_vma_resource *vma_res)
{}

void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
{}

void i915_vma_flush_writes(struct i915_vma *vma)
{}

void i915_vma_unpin_iomap(struct i915_vma *vma)
{}

void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
{}

bool i915_vma_misplaced(const struct i915_vma *vma,
			u64 size, u64 alignment, u64 flags)
{}

void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{}

bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
{}

/**
 * i915_vma_insert - finds a slot for the vma in its address space
 * @vma: the vma
 * @ww: An optional struct i915_gem_ww_ctx
 * @size: requested size in bytes (can be larger than the VMA)
 * @alignment: required alignment
 * @flags: mask of PIN_* flags to use
 *
 * First we try to allocate some free space that meets the requirements for
 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
 * preferrably the oldest idle entry to make room for the new VMA.
 *
 * Returns:
 * 0 on success, negative error code otherwise.
 */
static int
i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
		u64 size, u64 alignment, u64 flags)
{}

static void
i915_vma_detach(struct i915_vma *vma)
{}

static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
{}

static struct scatterlist *
rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
	     unsigned int width, unsigned int height,
	     unsigned int src_stride, unsigned int dst_stride,
	     struct sg_table *st, struct scatterlist *sg)
{}

static noinline struct sg_table *
intel_rotate_pages(struct intel_rotation_info *rot_info,
		   struct drm_i915_gem_object *obj)
{}

static struct scatterlist *
add_padding_pages(unsigned int count,
		  struct sg_table *st, struct scatterlist *sg)
{}

static struct scatterlist *
remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
			      unsigned long offset, unsigned int alignment_pad,
			      unsigned int width, unsigned int height,
			      unsigned int src_stride, unsigned int dst_stride,
			      struct sg_table *st, struct scatterlist *sg,
			      unsigned int *gtt_offset)
{}

static struct scatterlist *
remap_contiguous_pages(struct drm_i915_gem_object *obj,
		       pgoff_t obj_offset,
		       unsigned int count,
		       struct sg_table *st, struct scatterlist *sg)
{}

static struct scatterlist *
remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
			       pgoff_t obj_offset, unsigned int alignment_pad,
			       unsigned int size,
			       struct sg_table *st, struct scatterlist *sg,
			       unsigned int *gtt_offset)
{}

static struct scatterlist *
remap_color_plane_pages(const struct intel_remapped_info *rem_info,
			struct drm_i915_gem_object *obj,
			int color_plane,
			struct sg_table *st, struct scatterlist *sg,
			unsigned int *gtt_offset)
{}

static noinline struct sg_table *
intel_remap_pages(struct intel_remapped_info *rem_info,
		  struct drm_i915_gem_object *obj)
{}

static noinline struct sg_table *
intel_partial_pages(const struct i915_gtt_view *view,
		    struct drm_i915_gem_object *obj)
{}

static int
__i915_vma_get_pages(struct i915_vma *vma)
{}

I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
{}

void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
{}

static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{}

I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
{}

static void vma_unbind_pages(struct i915_vma *vma)
{}

int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
		    u64 size, u64 alignment, u64 flags)
{}

static void flush_idle_contexts(struct intel_gt *gt)
{}

static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
			   u32 align, unsigned int flags)
{}

int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
		  u32 align, unsigned int flags)
{}

/**
 * i915_ggtt_clear_scanout - Clear scanout flag for all objects ggtt vmas
 * @obj: i915 GEM object
 * This function clears scanout flags for objects ggtt vmas. These flags are set
 * when object is pinned for display use and this function to clear them all is
 * targeted to be called by frontbuffer tracking code when the frontbuffer is
 * about to be released.
 */
void i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj)
{}

static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
{}

void i915_vma_close(struct i915_vma *vma)
{}

static void __i915_vma_remove_closed(struct i915_vma *vma)
{}

void i915_vma_reopen(struct i915_vma *vma)
{}

static void force_unbind(struct i915_vma *vma)
{}

static void release_references(struct i915_vma *vma, struct intel_gt *gt,
			       bool vm_ddestroy)
{}

/*
 * i915_vma_destroy_locked - Remove all weak reference to the vma and put
 * the initial reference.
 *
 * This function should be called when it's decided the vma isn't needed
 * anymore. The caller must assure that it doesn't race with another lookup
 * plus destroy, typically by taking an appropriate reference.
 *
 * Current callsites are
 * - __i915_gem_object_pages_fini()
 * - __i915_vm_close() - Blocks the above function by taking a reference on
 * the object.
 * - __i915_vma_parked() - Blocks the above functions by taking a reference
 * on the vm and a reference on the object. Also takes the object lock so
 * destruction from __i915_vma_parked() can be blocked by holding the
 * object lock. Since the object lock is only allowed from within i915 with
 * an object refcount, holding the object lock also implicitly blocks the
 * vma freeing from __i915_gem_object_pages_fini().
 *
 * Because of locks taken during destruction, a vma is also guaranteed to
 * stay alive while the following locks are held if it was looked up while
 * holding one of the locks:
 * - vm->mutex
 * - obj->vma.lock
 * - gt->closed_lock
 */
void i915_vma_destroy_locked(struct i915_vma *vma)
{}

void i915_vma_destroy(struct i915_vma *vma)
{}

void i915_vma_parked(struct intel_gt *gt)
{}

static void __i915_vma_iounmap(struct i915_vma *vma)
{}

void i915_vma_revoke_mmap(struct i915_vma *vma)
{}

static int
__i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
{}

static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
{}

int _i915_vma_move_to_active(struct i915_vma *vma,
			     struct i915_request *rq,
			     struct dma_fence *fence,
			     unsigned int flags)
{}

struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
{}

int __i915_vma_unbind(struct i915_vma *vma)
{}

static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
{}

int i915_vma_unbind(struct i915_vma *vma)
{}

int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
{}

int i915_vma_unbind_unlocked(struct i915_vma *vma)
{}

struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
{}

void i915_vma_make_shrinkable(struct i915_vma *vma)
{}

void i915_vma_make_purgeable(struct i915_vma *vma)
{}

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_vma.c"
#endif

void i915_vma_module_exit(void)
{}

int __init i915_vma_module_init(void)
{}