linux/drivers/gpu/drm/i915/gt/intel_gtt.c

// SPDX-License-Identifier: MIT
/*
 * Copyright © 2020 Intel Corporation
 */

#include <linux/slab.h> /* fault-inject.h is not standalone! */

#include <linux/fault-inject.h>
#include <linux/sched/mm.h>

#include <drm/drm_cache.h>

#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "i915_reg.h"
#include "i915_trace.h"
#include "i915_utils.h"
#include "intel_gt.h"
#include "intel_gt_mcr.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_gtt.h"

bool i915_ggtt_require_binder(struct drm_i915_private *i915)
{}

static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
{}

bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
{}

struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
{}

struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
{}

int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{}

int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{}

static void clear_vm_list(struct list_head *list)
{}

static void __i915_vm_close(struct i915_address_space *vm)
{}

/* lock the vm into the current ww, if we lock one, we lock all */
int i915_vm_lock_objects(struct i915_address_space *vm,
			 struct i915_gem_ww_ctx *ww)
{}

void i915_address_space_fini(struct i915_address_space *vm)
{}

/**
 * i915_vm_resv_release - Final struct i915_address_space destructor
 * @kref: Pointer to the &i915_address_space.resv_ref member.
 *
 * This function is called when the last lock sharer no longer shares the
 * &i915_address_space._resv lock, and also if we raced when
 * destroying a vma by the vma destruction
 */
void i915_vm_resv_release(struct kref *kref)
{}

static void __i915_vm_release(struct work_struct *work)
{}

void i915_vm_release(struct kref *kref)
{}

void i915_address_space_init(struct i915_address_space *vm, int subclass)
{}

void *__px_vaddr(struct drm_i915_gem_object *p)
{}

dma_addr_t __px_dma(struct drm_i915_gem_object *p)
{}

struct page *__px_page(struct drm_i915_gem_object *p)
{}

void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
{}

static void poison_scratch_page(struct drm_i915_gem_object *scratch)
{}

int setup_scratch_page(struct i915_address_space *vm)
{}

void free_scratch(struct i915_address_space *vm)
{}

void gtt_write_workarounds(struct intel_gt *gt)
{}

static void xelpmp_setup_private_ppat(struct intel_uncore *uncore)
{}

static void xelpg_setup_private_ppat(struct intel_gt *gt)
{}

static void tgl_setup_private_ppat(struct intel_uncore *uncore)
{}

static void xehp_setup_private_ppat(struct intel_gt *gt)
{}

static void icl_setup_private_ppat(struct intel_uncore *uncore)
{}

/*
 * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
 * bits. When using advanced contexts each context stores its own PAT, but
 * writing this data shouldn't be harmful even in those cases.
 */
static void bdw_setup_private_ppat(struct intel_uncore *uncore)
{}

static void chv_setup_private_ppat(struct intel_uncore *uncore)
{}

void setup_private_pat(struct intel_gt *gt)
{}

struct i915_vma *
__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
{}

struct i915_vma *
__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
{}

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gtt.c"
#endif