linux/drivers/gpu/drm/i915/i915_gem.c

/*
 * Copyright © 2008-2015 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <[email protected]>
 *
 */

#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/dma-resv.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include <linux/mman.h>

#include <drm/drm_cache.h>
#include <drm/drm_vma_manager.h>

#include "gem/i915_gem_clflush.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_object_frontbuffer.h"
#include "gem/i915_gem_pm.h"
#include "gem/i915_gem_region.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_workarounds.h"

#include "i915_drv.h"
#include "i915_file_private.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_clock_gating.h"

static int
insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
{}

static void
remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
{}

int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
			    struct drm_file *file)
{}

int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
			   unsigned long flags)
{}

static int
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
	    bool needs_clflush)
{}

static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args)
{}

static inline bool
gtt_user_read(struct io_mapping *mapping,
	      loff_t base, int offset,
	      char __user *user_data, int length)
{}

static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
					     struct drm_mm_node *node,
					     bool write)
{}

static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
				 struct drm_mm_node *node,
				 struct i915_vma *vma)
{}

static int
i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
		   const struct drm_i915_gem_pread *args)
{}

/**
 * i915_gem_pread_ioctl - Reads data from the object referenced by handle.
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
		     struct drm_file *file)
{}

/* This is the fast write path which cannot handle
 * page faults in the source data
 */

static inline bool
ggtt_write(struct io_mapping *mapping,
	   loff_t base, int offset,
	   char __user *user_data, int length)
{}

/**
 * i915_gem_gtt_pwrite_fast - This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
 * @obj: i915 GEM object
 * @args: pwrite arguments structure
 */
static int
i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
			 const struct drm_i915_gem_pwrite *args)
{}

/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set.
 */
static int
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
	     bool needs_clflush_before,
	     bool needs_clflush_after)
{}

static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
		      const struct drm_i915_gem_pwrite *args)
{}

/**
 * i915_gem_pwrite_ioctl - Writes data to the object referenced by handle.
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{}

/**
 * i915_gem_sw_finish_ioctl - Called when user space has done writes to this buffer
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
			 struct drm_file *file)
{}

void i915_gem_runtime_suspend(struct drm_i915_private *i915)
{}

static void discard_ggtt_vma(struct i915_vma *vma)
{}

struct i915_vma *
i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
			    struct i915_gem_ww_ctx *ww,
			    const struct i915_gtt_view *view,
			    u64 size, u64 alignment, u64 flags)
{}

struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_gtt_view *view,
			 u64 size, u64 alignment, u64 flags)
{}

int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{}

/*
 * A single pass should suffice to release all the freed objects (along most
 * call paths), but be a little more paranoid in that freeing the objects does
 * take a little amount of time, during which the rcu callbacks could have added
 * new objects into the freed list, and armed the work again.
 */
void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{}

/*
 * Similar to objects above (see i915_gem_drain_freed-objects), in general we
 * have workers that are armed by RCU and then rearm themselves in their
 * callbacks. To be paranoid, we need to drain the workqueue a second time after
 * waiting for the RCU grace period so that we catch work queued via RCU from
 * the first pass. As neither drain_workqueue() nor flush_workqueue() report a
 * result, we make an assumption that we only don't require more than 3 passes
 * to catch all _recursive_ RCU delayed work.
 */
void i915_gem_drain_workqueue(struct drm_i915_private *i915)
{}

int i915_gem_init(struct drm_i915_private *dev_priv)
{}

void i915_gem_driver_register(struct drm_i915_private *i915)
{}

void i915_gem_driver_unregister(struct drm_i915_private *i915)
{}

void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{}

void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{}

static void i915_gem_init__mm(struct drm_i915_private *i915)
{}

void i915_gem_init_early(struct drm_i915_private *dev_priv)
{}

void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
{}

int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
{}

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gem_device.c"
#include "selftests/i915_gem.c"
#endif