linux/drivers/gpu/drm/xe/xe_bo.c

// SPDX-License-Identifier: MIT
/*
 * Copyright © 2021 Intel Corporation
 */

#include "xe_bo.h"

#include <linux/dma-buf.h>

#include <drm/drm_drv.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/xe_drm.h>

#include "xe_device.h"
#include "xe_dma_buf.h"
#include "xe_drm_client.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_map.h"
#include "xe_migrate.h"
#include "xe_pm.h"
#include "xe_preempt_fence.h"
#include "xe_res_cursor.h"
#include "xe_trace_bo.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"

const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES]  =;

static const struct ttm_place sys_placement_flags =;

static struct ttm_placement sys_placement =;

static const struct ttm_place tt_placement_flags[] =;

static struct ttm_placement tt_placement =;

bool mem_type_is_vram(u32 mem_type)
{}

static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res)
{}

static bool resource_is_vram(struct ttm_resource *res)
{}

bool xe_bo_is_vram(struct xe_bo *bo)
{}

bool xe_bo_is_stolen(struct xe_bo *bo)
{}

/**
 * xe_bo_has_single_placement - check if BO is placed only in one memory location
 * @bo: The BO
 *
 * This function checks whether a given BO is placed in only one memory location.
 *
 * Returns: true if the BO is placed in a single memory location, false otherwise.
 *
 */
bool xe_bo_has_single_placement(struct xe_bo *bo)
{}

/**
 * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR
 * @bo: The BO
 *
 * The stolen memory is accessed through the PCI BAR for both DGFX and some
 * integrated platforms that have a dedicated bit in the PTE for devmem (DM).
 *
 * Returns: true if it's stolen memory accessed via PCI BAR, false otherwise.
 */
bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
{}

static bool xe_bo_is_user(struct xe_bo *bo)
{}

static struct xe_migrate *
mem_type_to_migrate(struct xe_device *xe, u32 mem_type)
{}

static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res)
{}

static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
			   u32 bo_flags, u32 *c)
{}

static void add_vram(struct xe_device *xe, struct xe_bo *bo,
		     struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
{}

static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
			 u32 bo_flags, u32 *c)
{}

static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
			   u32 bo_flags, u32 *c)
{}

static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
				       u32 bo_flags)
{}

int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
			      u32 bo_flags)
{}

static void xe_evict_flags(struct ttm_buffer_object *tbo,
			   struct ttm_placement *placement)
{}

struct xe_ttm_tt {};

static int xe_tt_map_sg(struct ttm_tt *tt)
{}

static void xe_tt_unmap_sg(struct ttm_tt *tt)
{}

struct sg_table *xe_bo_sg(struct xe_bo *bo)
{}

static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
				       u32 page_flags)
{}

static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
			      struct ttm_operation_ctx *ctx)
{}

static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
{}

static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
{}

static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
				 struct ttm_resource *mem)
{}

static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
				const struct ttm_operation_ctx *ctx)
{}

/*
 * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
 * Note that unmapping the attachment is deferred to the next
 * map_attachment time, or to bo destroy (after idling) whichever comes first.
 * This is to avoid syncing before unmap_attachment(), assuming that the
 * caller relies on idling the reservation object before moving the
 * backing store out. Should that assumption not hold, then we will be able
 * to unconditionally call unmap_attachment() when moving out to system.
 */
static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
			     struct ttm_resource *new_res)
{}

/**
 * xe_bo_move_notify - Notify subsystems of a pending move
 * @bo: The buffer object
 * @ctx: The struct ttm_operation_ctx controlling locking and waits.
 *
 * This function notifies subsystems of an upcoming buffer move.
 * Upon receiving such a notification, subsystems should schedule
 * halting access to the underlying pages and optionally add a fence
 * to the buffer object's dma_resv object, that signals when access is
 * stopped. The caller will wait on all dma_resv fences before
 * starting the move.
 *
 * A subsystem may commence access to the object after obtaining
 * bindings to the new backing memory under the object lock.
 *
 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
 * negative error code on error.
 */
static int xe_bo_move_notify(struct xe_bo *bo,
			     const struct ttm_operation_ctx *ctx)
{}

static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
		      struct ttm_operation_ctx *ctx,
		      struct ttm_resource *new_mem,
		      struct ttm_place *hop)
{}

/**
 * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
 * @bo: The buffer object to move.
 *
 * On successful completion, the object memory will be moved to sytem memory.
 *
 * This is needed to for special handling of pinned VRAM object during
 * suspend-resume.
 *
 * Return: 0 on success. Negative error code on failure.
 */
int xe_bo_evict_pinned(struct xe_bo *bo)
{}

/**
 * xe_bo_restore_pinned() - Restore a pinned VRAM object
 * @bo: The buffer object to move.
 *
 * On successful completion, the object memory will be moved back to VRAM.
 *
 * This is needed to for special handling of pinned VRAM object during
 * suspend-resume.
 *
 * Return: 0 on success. Negative error code on failure.
 */
int xe_bo_restore_pinned(struct xe_bo *bo)
{}

static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
				       unsigned long page_offset)
{}

static void __xe_bo_vunmap(struct xe_bo *bo);

/*
 * TODO: Move this function to TTM so we don't rely on how TTM does its
 * locking, thereby abusing TTM internals.
 */
static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
{}

static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
{}

static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
{}

const struct ttm_device_funcs xe_ttm_funcs =;

static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
{}

static void xe_gem_object_free(struct drm_gem_object *obj)
{}

static void xe_gem_object_close(struct drm_gem_object *obj,
				struct drm_file *file_priv)
{}

static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
{}

static const struct vm_operations_struct xe_gem_vm_ops =;

static const struct drm_gem_object_funcs xe_gem_object_funcs =;

/**
 * xe_bo_alloc - Allocate storage for a struct xe_bo
 *
 * This funcition is intended to allocate storage to be used for input
 * to __xe_bo_create_locked(), in the case a pointer to the bo to be
 * created is needed before the call to __xe_bo_create_locked().
 * If __xe_bo_create_locked ends up never to be called, then the
 * storage allocated with this function needs to be freed using
 * xe_bo_free().
 *
 * Return: A pointer to an uninitialized struct xe_bo on success,
 * ERR_PTR(-ENOMEM) on error.
 */
struct xe_bo *xe_bo_alloc(void)
{}

/**
 * xe_bo_free - Free storage allocated using xe_bo_alloc()
 * @bo: The buffer object storage.
 *
 * Refer to xe_bo_alloc() documentation for valid use-cases.
 */
void xe_bo_free(struct xe_bo *bo)
{}

struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
				     struct xe_tile *tile, struct dma_resv *resv,
				     struct ttm_lru_bulk_move *bulk, size_t size,
				     u16 cpu_caching, enum ttm_bo_type type,
				     u32 flags)
{}

static int __xe_bo_fixed_placement(struct xe_device *xe,
				   struct xe_bo *bo,
				   u32 flags,
				   u64 start, u64 end, u64 size)
{}

static struct xe_bo *
__xe_bo_create_locked(struct xe_device *xe,
		      struct xe_tile *tile, struct xe_vm *vm,
		      size_t size, u64 start, u64 end,
		      u16 cpu_caching, enum ttm_bo_type type, u32 flags)
{}

struct xe_bo *
xe_bo_create_locked_range(struct xe_device *xe,
			  struct xe_tile *tile, struct xe_vm *vm,
			  size_t size, u64 start, u64 end,
			  enum ttm_bo_type type, u32 flags)
{}

struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
				  struct xe_vm *vm, size_t size,
				  enum ttm_bo_type type, u32 flags)
{}

struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
				struct xe_vm *vm, size_t size,
				u16 cpu_caching,
				enum ttm_bo_type type,
				u32 flags)
{}

struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
			   struct xe_vm *vm, size_t size,
			   enum ttm_bo_type type, u32 flags)
{}

struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
				      struct xe_vm *vm,
				      size_t size, u64 offset,
				      enum ttm_bo_type type, u32 flags)
{}

struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
				   struct xe_vm *vm, size_t size,
				   enum ttm_bo_type type, u32 flags)
{}

struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
				     const void *data, size_t size,
				     enum ttm_bo_type type, u32 flags)
{}

static void __xe_bo_unpin_map_no_vm(struct drm_device *drm, void *arg)
{}

struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
					   size_t size, u32 flags)
{}

struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
					     const void *data, size_t size, u32 flags)
{}

/**
 * xe_managed_bo_reinit_in_vram
 * @xe: xe device
 * @tile: Tile where the new buffer will be created
 * @src: Managed buffer object allocated in system memory
 *
 * Replace a managed src buffer object allocated in system memory with a new
 * one allocated in vram, copying the data between them.
 * Buffer object in VRAM is not going to have the same GGTT address, the caller
 * is responsible for making sure that any old references to it are updated.
 *
 * Returns 0 for success, negative error code otherwise.
 */
int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src)
{}

/*
 * XXX: This is in the VM bind data path, likely should calculate this once and
 * store, with a recalculation if the BO is moved.
 */
uint64_t vram_region_gpu_offset(struct ttm_resource *res)
{}

/**
 * xe_bo_pin_external - pin an external BO
 * @bo: buffer object to be pinned
 *
 * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
 * BO. Unique call compared to xe_bo_pin as this function has it own set of
 * asserts and code to ensure evict / restore on suspend / resume.
 *
 * Returns 0 for success, negative error code otherwise.
 */
int xe_bo_pin_external(struct xe_bo *bo)
{}

int xe_bo_pin(struct xe_bo *bo)
{}

/**
 * xe_bo_unpin_external - unpin an external BO
 * @bo: buffer object to be unpinned
 *
 * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
 * BO. Unique call compared to xe_bo_unpin as this function has it own set of
 * asserts and code to ensure evict / restore on suspend / resume.
 *
 * Returns 0 for success, negative error code otherwise.
 */
void xe_bo_unpin_external(struct xe_bo *bo)
{}

void xe_bo_unpin(struct xe_bo *bo)
{}

/**
 * xe_bo_validate() - Make sure the bo is in an allowed placement
 * @bo: The bo,
 * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or
 *      NULL. Used together with @allow_res_evict.
 * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
 *                   reservation object.
 *
 * Make sure the bo is in allowed placement, migrating it if necessary. If
 * needed, other bos will be evicted. If bos selected for eviction shares
 * the @vm's reservation object, they can be evicted iff @allow_res_evict is
 * set to true, otherwise they will be bypassed.
 *
 * Return: 0 on success, negative error code on failure. May return
 * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
 */
int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
{}

bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
{}

/*
 * Resolve a BO address. There is no assert to check if the proper lock is held
 * so it should only be used in cases where it is not fatal to get the wrong
 * address, such as printing debug information, but not in cases where memory is
 * written based on this result.
 */
dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
{}

dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
{}

int xe_bo_vmap(struct xe_bo *bo)
{}

static void __xe_bo_vunmap(struct xe_bo *bo)
{}

void xe_bo_vunmap(struct xe_bo *bo)
{}

int xe_gem_create_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{}

int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
			     struct drm_file *file)
{}

/**
 * xe_bo_lock() - Lock the buffer object's dma_resv object
 * @bo: The struct xe_bo whose lock is to be taken
 * @intr: Whether to perform any wait interruptible
 *
 * Locks the buffer object's dma_resv object. If the buffer object is
 * pointing to a shared dma_resv object, that shared lock is locked.
 *
 * Return: 0 on success, -EINTR if @intr is true and the wait for a
 * contended lock was interrupted. If @intr is set to false, the
 * function always returns 0.
 */
int xe_bo_lock(struct xe_bo *bo, bool intr)
{}

/**
 * xe_bo_unlock() - Unlock the buffer object's dma_resv object
 * @bo: The struct xe_bo whose lock is to be released.
 *
 * Unlock a buffer object lock that was locked by xe_bo_lock().
 */
void xe_bo_unlock(struct xe_bo *bo)
{}

/**
 * xe_bo_can_migrate - Whether a buffer object likely can be migrated
 * @bo: The buffer object to migrate
 * @mem_type: The TTM memory type intended to migrate to
 *
 * Check whether the buffer object supports migration to the
 * given memory type. Note that pinning may affect the ability to migrate as
 * returned by this function.
 *
 * This function is primarily intended as a helper for checking the
 * possibility to migrate buffer objects and can be called without
 * the object lock held.
 *
 * Return: true if migration is possible, false otherwise.
 */
bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type)
{}

static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
{}

/**
 * xe_bo_migrate - Migrate an object to the desired region id
 * @bo: The buffer object to migrate.
 * @mem_type: The TTM region type to migrate to.
 *
 * Attempt to migrate the buffer object to the desired memory region. The
 * buffer object may not be pinned, and must be locked.
 * On successful completion, the object memory type will be updated,
 * but an async migration task may not have completed yet, and to
 * accomplish that, the object's kernel fences must be signaled with
 * the object lock held.
 *
 * Return: 0 on success. Negative error code on failure. In particular may
 * return -EINTR or -ERESTARTSYS if signal pending.
 */
int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
{}

/**
 * xe_bo_evict - Evict an object to evict placement
 * @bo: The buffer object to migrate.
 * @force_alloc: Set force_alloc in ttm_operation_ctx
 *
 * On successful completion, the object memory will be moved to evict
 * placement. Ths function blocks until the object has been fully moved.
 *
 * Return: 0 on success. Negative error code on failure.
 */
int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
{}

/**
 * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
 * placed in system memory.
 * @bo: The xe_bo
 *
 * Return: true if extra pages need to be allocated, false otherwise.
 */
bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
{}

/**
 * __xe_bo_release_dummy() - Dummy kref release function
 * @kref: The embedded struct kref.
 *
 * Dummy release function for xe_bo_put_deferred(). Keep off.
 */
void __xe_bo_release_dummy(struct kref *kref)
{}

/**
 * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
 * @deferred: The lockless list used for the call to xe_bo_put_deferred().
 *
 * Puts all bos whose put was deferred by xe_bo_put_deferred().
 * The @deferred list can be either an onstack local list or a global
 * shared list used by a workqueue.
 */
void xe_bo_put_commit(struct llist_head *deferred)
{}

/**
 * xe_bo_dumb_create - Create a dumb bo as backing for a fb
 * @file_priv: ...
 * @dev: ...
 * @args: ...
 *
 * See dumb_create() hook in include/drm/drm_drv.h
 *
 * Return: ...
 */
int xe_bo_dumb_create(struct drm_file *file_priv,
		      struct drm_device *dev,
		      struct drm_mode_create_dumb *args)
{}

void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo)
{}

#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_bo.c"
#endif