linux/drivers/gpu/drm/i915/gem/i915_gem_object_types.h

/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2016 Intel Corporation
 */

#ifndef __I915_GEM_OBJECT_TYPES_H__
#define __I915_GEM_OBJECT_TYPES_H__

#include <linux/mmu_notifier.h>

#include <drm/drm_gem.h>
#include <drm/ttm/ttm_bo.h>
#include <uapi/drm/i915_drm.h>

#include "i915_active.h"
#include "i915_selftest.h"
#include "i915_vma_resource.h"

#include "gt/intel_gt_defines.h"

struct drm_i915_gem_object;
struct intel_fronbuffer;
struct intel_memory_region;

/*
 * struct i915_lut_handle tracks the fast lookups from handle to vma used
 * for execbuf. Although we use a radixtree for that mapping, in order to
 * remove them as the object or context is closed, we need a secondary list
 * and a translation entry (i915_lut_handle).
 */
struct i915_lut_handle {};

struct drm_i915_gem_object_ops {};

/**
 * enum i915_cache_level - The supported GTT caching values for system memory
 * pages.
 *
 * These translate to some special GTT PTE bits when binding pages into some
 * address space. It also determines whether an object, or rather its pages are
 * coherent with the GPU, when also reading or writing through the CPU cache
 * with those pages.
 *
 * Userspace can also control this through struct drm_i915_gem_caching.
 */
enum i915_cache_level {};

enum i915_map_type {};

enum i915_mmap_type {};

struct i915_mmap_offset {};

struct i915_gem_object_page_iter {};

struct drm_i915_gem_object {};

#define intel_bo_to_drm_bo(bo)
#define intel_bo_to_i915(bo)

static inline struct drm_i915_gem_object *
to_intel_bo(struct drm_gem_object *gem)
{}

#endif