linux/drivers/gpu/drm/i915/i915_request.c

/*
 * Copyright © 2008-2015 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

#include <linux/dma-fence-array.h>
#include <linux/dma-fence-chain.h>
#include <linux/irq_work.h>
#include <linux/prefetch.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>

#include "gem/i915_gem_context.h"
#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_reset.h"
#include "gt/intel_ring.h"
#include "gt/intel_rps.h"

#include "i915_active.h"
#include "i915_config.h"
#include "i915_deps.h"
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_trace.h"

struct execute_cb {};

static struct kmem_cache *slab_requests;
static struct kmem_cache *slab_execute_cbs;

static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{}

static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{}

static bool i915_fence_signaled(struct dma_fence *fence)
{}

static bool i915_fence_enable_signaling(struct dma_fence *fence)
{}

static signed long i915_fence_wait(struct dma_fence *fence,
				   bool interruptible,
				   signed long timeout)
{}

struct kmem_cache *i915_request_slab_cache(void)
{}

static void i915_fence_release(struct dma_fence *fence)
{}

const struct dma_fence_ops i915_fence_ops =;

static void irq_execute_cb(struct irq_work *wrk)
{}

static __always_inline void
__notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
{}

static void __notify_execute_cb_irq(struct i915_request *rq)
{}

static bool irq_work_imm(struct irq_work *wrk)
{}

void i915_request_notify_execute_cb_imm(struct i915_request *rq)
{}

static void __i915_request_fill(struct i915_request *rq, u8 val)
{}

/**
 * i915_request_active_engine
 * @rq: request to inspect
 * @active: pointer in which to return the active engine
 *
 * Fills the currently active engine to the @active pointer if the request
 * is active and still not completed.
 *
 * Returns true if request was active or false otherwise.
 */
bool
i915_request_active_engine(struct i915_request *rq,
			   struct intel_engine_cs **active)
{}

static void __rq_init_watchdog(struct i915_request *rq)
{}

static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer)
{}

static void __rq_arm_watchdog(struct i915_request *rq)
{}

static void __rq_cancel_watchdog(struct i915_request *rq)
{}

#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)

/**
 * i915_request_free_capture_list - Free a capture list
 * @capture: Pointer to the first list item or NULL
 *
 */
void i915_request_free_capture_list(struct i915_capture_list *capture)
{}

#define assert_capture_list_is_null(_rq)

#define clear_capture_list(_rq)

#else

#define i915_request_free_capture_list

#define assert_capture_list_is_null

#define clear_capture_list

#endif

bool i915_request_retire(struct i915_request *rq)
{}

void i915_request_retire_upto(struct i915_request *rq)
{}

static struct i915_request * const *
__engine_active(struct intel_engine_cs *engine)
{}

static bool __request_in_flight(const struct i915_request *signal)
{}

static int
__await_execution(struct i915_request *rq,
		  struct i915_request *signal,
		  gfp_t gfp)
{}

static bool fatal_error(int error)
{}

void __i915_request_skip(struct i915_request *rq)
{}

bool i915_request_set_error_once(struct i915_request *rq, int error)
{}

struct i915_request *i915_request_mark_eio(struct i915_request *rq)
{}

bool __i915_request_submit(struct i915_request *request)
{}

void i915_request_submit(struct i915_request *request)
{}

void __i915_request_unsubmit(struct i915_request *request)
{}

void i915_request_unsubmit(struct i915_request *request)
{}

void i915_request_cancel(struct i915_request *rq, int error)
{}

static int
submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{}

static int
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{}

static void retire_requests(struct intel_timeline *tl)
{}

static noinline struct i915_request *
request_alloc_slow(struct intel_timeline *tl,
		   struct i915_request **rsvd,
		   gfp_t gfp)
{}

static void __i915_request_ctor(void *arg)
{}

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#define clear_batch_ptr(_rq)
#else
#define clear_batch_ptr
#endif

struct i915_request *
__i915_request_create(struct intel_context *ce, gfp_t gfp)
{}

struct i915_request *
i915_request_create(struct intel_context *ce)
{}

static int
i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
{}

static intel_engine_mask_t
already_busywaiting(struct i915_request *rq)
{}

static int
__emit_semaphore_wait(struct i915_request *to,
		      struct i915_request *from,
		      u32 seqno)
{}

static bool
can_use_semaphore_wait(struct i915_request *to, struct i915_request *from)
{}

static int
emit_semaphore_wait(struct i915_request *to,
		    struct i915_request *from,
		    gfp_t gfp)
{}

static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
					  struct dma_fence *fence)
{}

static int intel_timeline_sync_set_start(struct intel_timeline *tl,
					 const struct dma_fence *fence)
{}

static int
__i915_request_await_execution(struct i915_request *to,
			       struct i915_request *from)
{}

static void mark_external(struct i915_request *rq)
{}

static int
__i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
{}

static int
i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
{}

static inline bool is_parallel_rq(struct i915_request *rq)
{}

static inline struct intel_context *request_to_parent(struct i915_request *rq)
{}

static bool is_same_parallel_context(struct i915_request *to,
				     struct i915_request *from)
{}

int
i915_request_await_execution(struct i915_request *rq,
			     struct dma_fence *fence)
{}

static int
await_request_submit(struct i915_request *to, struct i915_request *from)
{}

static int
i915_request_await_request(struct i915_request *to, struct i915_request *from)
{}

int
i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
{}

/**
 * i915_request_await_deps - set this request to (async) wait upon a struct
 * i915_deps dma_fence collection
 * @rq: request we are wishing to use
 * @deps: The struct i915_deps containing the dependencies.
 *
 * Returns 0 if successful, negative error code on error.
 */
int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps)
{}

/**
 * i915_request_await_object - set this request to (async) wait upon a bo
 * @to: request we are wishing to use
 * @obj: object which may be in use on another ring.
 * @write: whether the wait is on behalf of a writer
 *
 * This code is meant to abstract object synchronization with the GPU.
 * Conceptually we serialise writes between engines inside the GPU.
 * We only allow one engine to write into a buffer at any time, but
 * multiple readers. To ensure each has a coherent view of memory, we must:
 *
 * - If there is an outstanding write request to the object, the new
 *   request must wait for it to complete (either CPU or in hw, requests
 *   on the same ring will be naturally ordered).
 *
 * - If we are a write request (pending_write_domain is set), the new
 *   request must wait for outstanding read requests to complete.
 *
 * Returns 0 if successful, else propagates up the lower layer error.
 */
int
i915_request_await_object(struct i915_request *to,
			  struct drm_i915_gem_object *obj,
			  bool write)
{}

static void i915_request_await_huc(struct i915_request *rq)
{}

static struct i915_request *
__i915_request_ensure_parallel_ordering(struct i915_request *rq,
					struct intel_timeline *timeline)
{}

static struct i915_request *
__i915_request_ensure_ordering(struct i915_request *rq,
			       struct intel_timeline *timeline)
{}

static struct i915_request *
__i915_request_add_to_timeline(struct i915_request *rq)
{}

/*
 * NB: This function is not allowed to fail. Doing so would mean the the
 * request is not being tracked for completion but the work itself is
 * going to happen on the hardware. This would be a Bad Thing(tm).
 */
struct i915_request *__i915_request_commit(struct i915_request *rq)
{}

void __i915_request_queue_bh(struct i915_request *rq)
{}

void __i915_request_queue(struct i915_request *rq,
			  const struct i915_sched_attr *attr)
{}

void i915_request_add(struct i915_request *rq)
{}

static unsigned long local_clock_ns(unsigned int *cpu)
{}

static bool busywait_stop(unsigned long timeout, unsigned int cpu)
{}

static bool __i915_spin_request(struct i915_request * const rq, int state)
{}

struct request_wait {};

static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
{}

/**
 * i915_request_wait_timeout - wait until execution of request has finished
 * @rq: the request to wait upon
 * @flags: how to wait
 * @timeout: how long to wait in jiffies
 *
 * i915_request_wait_timeout() waits for the request to be completed, for a
 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
 * unbounded wait).
 *
 * Returns the remaining time (in jiffies) if the request completed, which may
 * be zero if the request is unfinished after the timeout expires.
 * If the timeout is 0, it will return 1 if the fence is signaled.
 *
 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
 * pending before the request completes.
 *
 * NOTE: This function has the same wait semantics as dma-fence.
 */
long i915_request_wait_timeout(struct i915_request *rq,
			       unsigned int flags,
			       long timeout)
{}

/**
 * i915_request_wait - wait until execution of request has finished
 * @rq: the request to wait upon
 * @flags: how to wait
 * @timeout: how long to wait in jiffies
 *
 * i915_request_wait() waits for the request to be completed, for a
 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
 * unbounded wait).
 *
 * Returns the remaining time (in jiffies) if the request completed, which may
 * be zero or -ETIME if the request is unfinished after the timeout expires.
 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
 * pending before the request completes.
 *
 * NOTE: This function behaves differently from dma-fence wait semantics for
 * timeout = 0. It returns 0 on success, and -ETIME if not signaled.
 */
long i915_request_wait(struct i915_request *rq,
		       unsigned int flags,
		       long timeout)
{}

static int print_sched_attr(const struct i915_sched_attr *attr,
			    char *buf, int x, int len)
{}

static char queue_status(const struct i915_request *rq)
{}

static const char *run_status(const struct i915_request *rq)
{}

static const char *fence_status(const struct i915_request *rq)
{}

void i915_request_show(struct drm_printer *m,
		       const struct i915_request *rq,
		       const char *prefix,
		       int indent)
{}

static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq)
{}

static bool match_ring(struct i915_request *rq)
{}

enum i915_request_state i915_test_request_state(struct i915_request *rq)
{}

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_request.c"
#endif

void i915_request_module_exit(void)
{}

int __init i915_request_module_init(void)
{}