#ifndef __INTEL_CONTEXT_H__
#define __INTEL_CONTEXT_H__
#include <linux/bitops.h>
#include <linux/lockdep.h>
#include <linux/types.h>
#include "i915_active.h"
#include "i915_drv.h"
#include "intel_context_types.h"
#include "intel_engine_types.h"
#include "intel_gt_pm.h"
#include "intel_ring_types.h"
#include "intel_timeline_types.h"
#include "i915_trace.h"
#define CE_TRACE(ce, fmt, ...) …
#define INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS …
struct i915_gem_ww_ctx;
void intel_context_init(struct intel_context *ce,
struct intel_engine_cs *engine);
void intel_context_fini(struct intel_context *ce);
void i915_context_module_exit(void);
int i915_context_module_init(void);
struct intel_context *
intel_context_create(struct intel_engine_cs *engine);
int intel_context_alloc_state(struct intel_context *ce);
void intel_context_free(struct intel_context *ce);
int intel_context_reconfigure_sseu(struct intel_context *ce,
const struct intel_sseu sseu);
#define PARENT_SCRATCH_SIZE …
static inline bool intel_context_is_child(struct intel_context *ce)
{ … }
static inline bool intel_context_is_parent(struct intel_context *ce)
{ … }
static inline bool intel_context_is_pinned(struct intel_context *ce);
static inline struct intel_context *
intel_context_to_parent(struct intel_context *ce)
{ … }
static inline bool intel_context_is_parallel(struct intel_context *ce)
{ … }
void intel_context_bind_parent_child(struct intel_context *parent,
struct intel_context *child);
#define for_each_child(parent, ce) …
#define for_each_child_safe(parent, ce, cn) …
static inline int intel_context_lock_pinned(struct intel_context *ce)
__acquires(ce->pin_mutex)
{ … }
static inline bool
intel_context_is_pinned(struct intel_context *ce)
{ … }
static inline void intel_context_cancel_request(struct intel_context *ce,
struct i915_request *rq)
{ … }
static inline void intel_context_unlock_pinned(struct intel_context *ce)
__releases(ce->pin_mutex)
{ … }
int __intel_context_do_pin(struct intel_context *ce);
int __intel_context_do_pin_ww(struct intel_context *ce,
struct i915_gem_ww_ctx *ww);
static inline bool intel_context_pin_if_active(struct intel_context *ce)
{ … }
static inline int intel_context_pin(struct intel_context *ce)
{ … }
static inline int intel_context_pin_ww(struct intel_context *ce,
struct i915_gem_ww_ctx *ww)
{ … }
static inline void __intel_context_pin(struct intel_context *ce)
{ … }
void __intel_context_do_unpin(struct intel_context *ce, int sub);
static inline void intel_context_sched_disable_unpin(struct intel_context *ce)
{ … }
static inline void intel_context_unpin(struct intel_context *ce)
{ … }
void intel_context_enter_engine(struct intel_context *ce);
void intel_context_exit_engine(struct intel_context *ce);
static inline void intel_context_enter(struct intel_context *ce)
{ … }
static inline void intel_context_mark_active(struct intel_context *ce)
{ … }
static inline void intel_context_exit(struct intel_context *ce)
{ … }
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{ … }
static inline void intel_context_put(struct intel_context *ce)
{ … }
static inline struct intel_timeline *__must_check
intel_context_timeline_lock(struct intel_context *ce)
__acquires(&ce->timeline->mutex)
{ … }
static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
__releases(&tl->mutex)
{ … }
int intel_context_prepare_remote_request(struct intel_context *ce,
struct i915_request *rq);
struct i915_request *intel_context_create_request(struct intel_context *ce);
struct i915_request *intel_context_get_active_request(struct intel_context *ce);
static inline bool intel_context_is_barrier(const struct intel_context *ce)
{ … }
static inline void intel_context_close(struct intel_context *ce)
{ … }
static inline bool intel_context_is_closed(const struct intel_context *ce)
{ … }
static inline bool intel_context_has_inflight(const struct intel_context *ce)
{ … }
static inline bool intel_context_use_semaphores(const struct intel_context *ce)
{ … }
static inline void intel_context_set_use_semaphores(struct intel_context *ce)
{ … }
static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
{ … }
static inline bool intel_context_is_banned(const struct intel_context *ce)
{ … }
static inline bool intel_context_set_banned(struct intel_context *ce)
{ … }
bool intel_context_ban(struct intel_context *ce, struct i915_request *rq);
static inline bool intel_context_is_schedulable(const struct intel_context *ce)
{ … }
static inline bool intel_context_is_exiting(const struct intel_context *ce)
{ … }
static inline bool intel_context_set_exiting(struct intel_context *ce)
{ … }
bool intel_context_revoke(struct intel_context *ce);
static inline bool
intel_context_force_single_submission(const struct intel_context *ce)
{ … }
static inline void
intel_context_set_single_submission(struct intel_context *ce)
{ … }
static inline bool
intel_context_nopreempt(const struct intel_context *ce)
{ … }
static inline void
intel_context_set_nopreempt(struct intel_context *ce)
{ … }
static inline void
intel_context_clear_nopreempt(struct intel_context *ce)
{ … }
#if IS_ENABLED(CONFIG_DRM_I915_REPLAY_GPU_HANGS_API)
static inline bool intel_context_has_own_state(const struct intel_context *ce)
{ … }
static inline bool intel_context_set_own_state(struct intel_context *ce)
{ … }
#else
static inline bool intel_context_has_own_state(const struct intel_context *ce)
{
return false;
}
static inline bool intel_context_set_own_state(struct intel_context *ce)
{
return true;
}
#endif
u64 intel_context_get_total_runtime_ns(struct intel_context *ce);
u64 intel_context_get_avg_runtime_ns(struct intel_context *ce);
static inline u64 intel_context_clock(void)
{ … }
#endif