linux/drivers/gpu/drm/i915/gem/i915_gem_ttm.c

// SPDX-License-Identifier: MIT
/*
 * Copyright © 2021 Intel Corporation
 */

#include <linux/shmem_fs.h>

#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_buddy.h>

#include "i915_drv.h"
#include "i915_ttm_buddy_manager.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"

#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
#include "gem/i915_gem_ttm_move.h"
#include "gem/i915_gem_ttm_pm.h"
#include "gt/intel_gpu_commands.h"

#define I915_TTM_PRIO_PURGE
#define I915_TTM_PRIO_NO_PAGES
#define I915_TTM_PRIO_HAS_PAGES
#define I915_TTM_PRIO_NEEDS_CPU_ACCESS

/*
 * Size of struct ttm_place vector in on-stack struct ttm_placement allocs
 */
#define I915_TTM_MAX_PLACEMENTS

/**
 * struct i915_ttm_tt - TTM page vector with additional private information
 * @ttm: The base TTM page vector.
 * @dev: The struct device used for dma mapping and unmapping.
 * @cached_rsgt: The cached scatter-gather table.
 * @is_shmem: Set if using shmem.
 * @filp: The shmem file, if using shmem backend.
 *
 * Note that DMA may be going on right up to the point where the page-
 * vector is unpopulated in delayed destroy. Hence keep the
 * scatter-gather table mapped and cached up to that point. This is
 * different from the cached gem object io scatter-gather table which
 * doesn't have an associated dma mapping.
 */
struct i915_ttm_tt {};

static const struct ttm_place sys_placement_flags =;

static struct ttm_placement i915_sys_placement =;

/**
 * i915_ttm_sys_placement - Return the struct ttm_placement to be
 * used for an object in system memory.
 *
 * Rather than making the struct extern, use this
 * function.
 *
 * Return: A pointer to a static variable for sys placement.
 */
struct ttm_placement *i915_ttm_sys_placement(void)
{}

static int i915_ttm_err_to_gem(int err)
{}

static enum ttm_caching
i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
{}

static void
i915_ttm_place_from_region(const struct intel_memory_region *mr,
			   struct ttm_place *place,
			   resource_size_t offset,
			   resource_size_t size,
			   unsigned int flags)
{}

static void
i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
			    struct ttm_place *places,
			    struct ttm_placement *placement)
{}

static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
				      struct ttm_tt *ttm,
				      struct ttm_operation_ctx *ctx)
{}

static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm)
{}

static void i915_ttm_tt_release(struct kref *ref)
{}

static const struct i915_refct_sgt_ops tt_rsgt_ops =;

static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
					 uint32_t page_flags)
{}

static int i915_ttm_tt_populate(struct ttm_device *bdev,
				struct ttm_tt *ttm,
				struct ttm_operation_ctx *ctx)
{}

static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{}

static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{}

static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
				       const struct ttm_place *place)
{}

static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
				 struct ttm_placement *placement)
{}

/**
 * i915_ttm_free_cached_io_rsgt - Free object cached LMEM information
 * @obj: The GEM object
 * This function frees any LMEM-related information that is cached on
 * the object. For example the radix tree for fast page lookup and the
 * cached refcounted sg-table
 */
void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj)
{}

/**
 * i915_ttm_purge - Clear an object of its memory
 * @obj: The object
 *
 * This function is called to clear an object of it's memory when it is
 * marked as not needed anymore.
 *
 * Return: 0 on success, negative error code on failure.
 */
int i915_ttm_purge(struct drm_i915_gem_object *obj)
{}

static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
{}

static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{}

static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm)
{}

/**
 * i915_ttm_resource_get_st - Get a refcounted sg-table pointing to the
 * resource memory
 * @obj: The GEM object used for sg-table caching
 * @res: The struct ttm_resource for which an sg-table is requested.
 *
 * This function returns a refcounted sg-table representing the memory
 * pointed to by @res. If @res is the object's current resource it may also
 * cache the sg_table on the object or attempt to access an already cached
 * sg-table. The refcounted sg-table needs to be put when no-longer in use.
 *
 * Return: A valid pointer to a struct i915_refct_sgt or error pointer on
 * failure.
 */
struct i915_refct_sgt *
i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
			 struct ttm_resource *res)
{}

static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
{}

static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
{}

/**
 * i915_ttm_resource_mappable - Return true if the ttm resource is CPU
 * accessible.
 * @res: The TTM resource to check.
 *
 * This is interesting on small-BAR systems where we may encounter lmem objects
 * that can't be accessed via the CPU.
 */
bool i915_ttm_resource_mappable(struct ttm_resource *res)
{}

static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{}

static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
					 unsigned long page_offset)
{}

static int i915_ttm_access_memory(struct ttm_buffer_object *bo,
				  unsigned long offset, void *buf,
				  int len, int write)
{}

/*
 * All callbacks need to take care not to downcast a struct ttm_buffer_object
 * without checking its subclass, since it might be a TTM ghost object.
 */
static struct ttm_device_funcs i915_ttm_bo_driver =;

/**
 * i915_ttm_driver - Return a pointer to the TTM device funcs
 *
 * Return: Pointer to statically allocated TTM device funcs.
 */
struct ttm_device_funcs *i915_ttm_driver(void)
{}

static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
				struct ttm_placement *placement)
{}

static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
{}

/**
 * DOC: Migration vs eviction
 *
 * GEM migration may not be the same as TTM migration / eviction. If
 * the TTM core decides to evict an object it may be evicted to a
 * TTM memory type that is not in the object's allowable GEM regions, or
 * in fact theoretically to a TTM memory type that doesn't correspond to
 * a GEM memory region. In that case the object's GEM region is not
 * updated, and the data is migrated back to the GEM region at
 * get_pages time. TTM may however set up CPU ptes to the object even
 * when it is evicted.
 * Gem forced migration using the i915_ttm_migrate() op, is allowed even
 * to regions that are not in the object's list of allowable placements.
 */
static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
			      struct intel_memory_region *mr,
			      unsigned int flags)
{}

static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
			    struct intel_memory_region *mr,
			    unsigned int flags)
{}

static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
			       struct sg_table *st)
{}

/**
 * i915_ttm_adjust_lru - Adjust an object's position on relevant LRU lists.
 * @obj: The object
 */
void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
{}

/*
 * TTM-backed gem object destruction requires some clarification.
 * Basically we have two possibilities here. We can either rely on the
 * i915 delayed destruction and put the TTM object when the object
 * is idle. This would be detected by TTM which would bypass the
 * TTM delayed destroy handling. The other approach is to put the TTM
 * object early and rely on the TTM destroyed handling, and then free
 * the leftover parts of the GEM object once TTM's destroyed list handling is
 * complete. For now, we rely on the latter for two reasons:
 * a) TTM can evict an object even when it's on the delayed destroy list,
 * which in theory allows for complete eviction.
 * b) There is work going on in TTM to allow freeing an object even when
 * it's not idle, and using the TTM destroyed list handling could help us
 * benefit from that.
 */
static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
{}

static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
{}

static int
vm_access_ttm(struct vm_area_struct *area, unsigned long addr,
	      void *buf, int len, int write)
{}

static void ttm_vm_open(struct vm_area_struct *vma)
{}

static void ttm_vm_close(struct vm_area_struct *vma)
{}

static const struct vm_operations_struct vm_ops_ttm =;

static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
{}

static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
{}

static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops =;

void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
{}

/*
 * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
 * @mem: The initial memory region for the object.
 * @obj: The gem object.
 * @size: Object size in bytes.
 * @flags: gem object flags.
 *
 * Return: 0 on success, negative error code on failure.
 */
int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
			       struct drm_i915_gem_object *obj,
			       resource_size_t offset,
			       resource_size_t size,
			       resource_size_t page_size,
			       unsigned int flags)
{}

static const struct intel_memory_region_ops ttm_system_region_ops =;

struct intel_memory_region *
i915_gem_ttm_system_setup(struct drm_i915_private *i915,
			  u16 type, u16 instance)
{}