#include <linux/dma-fence-array.h>
#include <linux/dma-fence-chain.h>
#include <linux/irq_work.h>
#include <linux/prefetch.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include "gem/i915_gem_context.h"
#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_reset.h"
#include "gt/intel_ring.h"
#include "gt/intel_rps.h"
#include "i915_active.h"
#include "i915_config.h"
#include "i915_deps.h"
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_trace.h"
struct execute_cb { … };
static struct kmem_cache *slab_requests;
static struct kmem_cache *slab_execute_cbs;
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{ … }
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{ … }
static bool i915_fence_signaled(struct dma_fence *fence)
{ … }
static bool i915_fence_enable_signaling(struct dma_fence *fence)
{ … }
static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
signed long timeout)
{ … }
struct kmem_cache *i915_request_slab_cache(void)
{ … }
static void i915_fence_release(struct dma_fence *fence)
{ … }
const struct dma_fence_ops i915_fence_ops = …;
static void irq_execute_cb(struct irq_work *wrk)
{ … }
static __always_inline void
__notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
{ … }
static void __notify_execute_cb_irq(struct i915_request *rq)
{ … }
static bool irq_work_imm(struct irq_work *wrk)
{ … }
void i915_request_notify_execute_cb_imm(struct i915_request *rq)
{ … }
static void __i915_request_fill(struct i915_request *rq, u8 val)
{ … }
bool
i915_request_active_engine(struct i915_request *rq,
struct intel_engine_cs **active)
{ … }
static void __rq_init_watchdog(struct i915_request *rq)
{ … }
static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer)
{ … }
static void __rq_arm_watchdog(struct i915_request *rq)
{ … }
static void __rq_cancel_watchdog(struct i915_request *rq)
{ … }
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
void i915_request_free_capture_list(struct i915_capture_list *capture)
{ … }
#define assert_capture_list_is_null(_rq) …
#define clear_capture_list(_rq) …
#else
#define i915_request_free_capture_list …
#define assert_capture_list_is_null …
#define clear_capture_list …
#endif
bool i915_request_retire(struct i915_request *rq)
{ … }
void i915_request_retire_upto(struct i915_request *rq)
{ … }
static struct i915_request * const *
__engine_active(struct intel_engine_cs *engine)
{ … }
static bool __request_in_flight(const struct i915_request *signal)
{ … }
static int
__await_execution(struct i915_request *rq,
struct i915_request *signal,
gfp_t gfp)
{ … }
static bool fatal_error(int error)
{ … }
void __i915_request_skip(struct i915_request *rq)
{ … }
bool i915_request_set_error_once(struct i915_request *rq, int error)
{ … }
struct i915_request *i915_request_mark_eio(struct i915_request *rq)
{ … }
bool __i915_request_submit(struct i915_request *request)
{ … }
void i915_request_submit(struct i915_request *request)
{ … }
void __i915_request_unsubmit(struct i915_request *request)
{ … }
void i915_request_unsubmit(struct i915_request *request)
{ … }
void i915_request_cancel(struct i915_request *rq, int error)
{ … }
static int
submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{ … }
static int
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{ … }
static void retire_requests(struct intel_timeline *tl)
{ … }
static noinline struct i915_request *
request_alloc_slow(struct intel_timeline *tl,
struct i915_request **rsvd,
gfp_t gfp)
{ … }
static void __i915_request_ctor(void *arg)
{ … }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#define clear_batch_ptr(_rq) …
#else
#define clear_batch_ptr …
#endif
struct i915_request *
__i915_request_create(struct intel_context *ce, gfp_t gfp)
{ … }
struct i915_request *
i915_request_create(struct intel_context *ce)
{ … }
static int
i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
{ … }
static intel_engine_mask_t
already_busywaiting(struct i915_request *rq)
{ … }
static int
__emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
u32 seqno)
{ … }
static bool
can_use_semaphore_wait(struct i915_request *to, struct i915_request *from)
{ … }
static int
emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
gfp_t gfp)
{ … }
static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
struct dma_fence *fence)
{ … }
static int intel_timeline_sync_set_start(struct intel_timeline *tl,
const struct dma_fence *fence)
{ … }
static int
__i915_request_await_execution(struct i915_request *to,
struct i915_request *from)
{ … }
static void mark_external(struct i915_request *rq)
{ … }
static int
__i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
{ … }
static int
i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
{ … }
static inline bool is_parallel_rq(struct i915_request *rq)
{ … }
static inline struct intel_context *request_to_parent(struct i915_request *rq)
{ … }
static bool is_same_parallel_context(struct i915_request *to,
struct i915_request *from)
{ … }
int
i915_request_await_execution(struct i915_request *rq,
struct dma_fence *fence)
{ … }
static int
await_request_submit(struct i915_request *to, struct i915_request *from)
{ … }
static int
i915_request_await_request(struct i915_request *to, struct i915_request *from)
{ … }
int
i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
{ … }
int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps)
{ … }
int
i915_request_await_object(struct i915_request *to,
struct drm_i915_gem_object *obj,
bool write)
{ … }
static void i915_request_await_huc(struct i915_request *rq)
{ … }
static struct i915_request *
__i915_request_ensure_parallel_ordering(struct i915_request *rq,
struct intel_timeline *timeline)
{ … }
static struct i915_request *
__i915_request_ensure_ordering(struct i915_request *rq,
struct intel_timeline *timeline)
{ … }
static struct i915_request *
__i915_request_add_to_timeline(struct i915_request *rq)
{ … }
struct i915_request *__i915_request_commit(struct i915_request *rq)
{ … }
void __i915_request_queue_bh(struct i915_request *rq)
{ … }
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr)
{ … }
void i915_request_add(struct i915_request *rq)
{ … }
static unsigned long local_clock_ns(unsigned int *cpu)
{ … }
static bool busywait_stop(unsigned long timeout, unsigned int cpu)
{ … }
static bool __i915_spin_request(struct i915_request * const rq, int state)
{ … }
struct request_wait { … };
static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
{ … }
long i915_request_wait_timeout(struct i915_request *rq,
unsigned int flags,
long timeout)
{ … }
long i915_request_wait(struct i915_request *rq,
unsigned int flags,
long timeout)
{ … }
static int print_sched_attr(const struct i915_sched_attr *attr,
char *buf, int x, int len)
{ … }
static char queue_status(const struct i915_request *rq)
{ … }
static const char *run_status(const struct i915_request *rq)
{ … }
static const char *fence_status(const struct i915_request *rq)
{ … }
void i915_request_show(struct drm_printer *m,
const struct i915_request *rq,
const char *prefix,
int indent)
{ … }
static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq)
{ … }
static bool match_ring(struct i915_request *rq)
{ … }
enum i915_request_state i915_test_request_state(struct i915_request *rq)
{ … }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_request.c"
#endif
void i915_request_module_exit(void)
{ … }
int __init i915_request_module_init(void)
{ … }