linux/drivers/gpu/drm/i915/intel_uncore.c

/*
 * Copyright © 2013 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include <drm/drm_managed.h>
#include <linux/pm_runtime.h>

#include "gt/intel_gt.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt_regs.h"

#include "i915_drv.h"
#include "i915_iosf_mbi.h"
#include "i915_reg.h"
#include "i915_trace.h"
#include "i915_vgpu.h"

#define FORCEWAKE_ACK_TIMEOUT_MS
#define GT_FIFO_TIMEOUT_MS

#define __raw_posting_read(...)

static void
fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
{}

void
intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
{}

static void mmio_debug_suspend(struct intel_uncore *uncore)
{}

static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);

static void mmio_debug_resume(struct intel_uncore *uncore)
{}

static const char * const forcewake_domain_names[] =;

const char *
intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
{}

#define fw_ack(d)
#define fw_set(d, val)
#define fw_clear(d, val)

static inline void
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
{}

static inline void
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
{}

static inline int
__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
	       const u32 ack,
	       const u32 value)
{}

static inline int
wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
	       const u32 ack)
{}

static inline int
wait_ack_set(const struct intel_uncore_forcewake_domain *d,
	     const u32 ack)
{}

static inline void
fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
{}

enum ack_type {};

static int
fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
				 const enum ack_type type)
{}

static inline void
fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
{}

static inline void
fw_domain_get(const struct intel_uncore_forcewake_domain *d)
{}

static inline void
fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
{}

static inline void
fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
{}

static inline void
fw_domain_put(const struct intel_uncore_forcewake_domain *d)
{}

static void
fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
{}

static void
fw_domains_get_with_fallback(struct intel_uncore *uncore,
			     enum forcewake_domains fw_domains)
{}

static void
fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
{}

static void
fw_domains_reset(struct intel_uncore *uncore,
		 enum forcewake_domains fw_domains)
{}

static inline u32 gt_thread_status(struct intel_uncore *uncore)
{}

static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
{}

static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
					      enum forcewake_domains fw_domains)
{}

static inline u32 fifo_free_entries(struct intel_uncore *uncore)
{}

static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
{}

static enum hrtimer_restart
intel_uncore_fw_release_timer(struct hrtimer *timer)
{}

/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
static unsigned int
intel_uncore_forcewake_reset(struct intel_uncore *uncore)
{}

static bool
fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
{}

static bool
vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
{}

static bool
gen6_check_for_fifo_debug(struct intel_uncore *uncore)
{}

static bool
check_for_unclaimed_mmio(struct intel_uncore *uncore)
{}

static void forcewake_early_sanitize(struct intel_uncore *uncore,
				     unsigned int restore_forcewake)
{}

void intel_uncore_suspend(struct intel_uncore *uncore)
{}

void intel_uncore_resume_early(struct intel_uncore *uncore)
{}

void intel_uncore_runtime_resume(struct intel_uncore *uncore)
{}

static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
					 enum forcewake_domains fw_domains)
{}

/**
 * intel_uncore_forcewake_get - grab forcewake domain references
 * @uncore: the intel_uncore structure
 * @fw_domains: forcewake domains to get reference on
 *
 * This function can be used get GT's forcewake domain references.
 * Normal register access will handle the forcewake domains automatically.
 * However if some sequence requires the GT to not power down a particular
 * forcewake domains this function should be called at the beginning of the
 * sequence. And subsequently the reference should be dropped by symmetric
 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
 */
void intel_uncore_forcewake_get(struct intel_uncore *uncore,
				enum forcewake_domains fw_domains)
{}

/**
 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
 * @uncore: the intel_uncore structure
 *
 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
 * the GT powerwell and in the process disable our debugging for the
 * duration of userspace's bypass.
 */
void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
{}

/**
 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
 * @uncore: the intel_uncore structure
 *
 * This function complements intel_uncore_forcewake_user_get() and releases
 * the GT powerwell taken on behalf of the userspace bypass.
 */
void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
{}

/**
 * intel_uncore_forcewake_get__locked - grab forcewake domain references
 * @uncore: the intel_uncore structure
 * @fw_domains: forcewake domains to get reference on
 *
 * See intel_uncore_forcewake_get(). This variant places the onus
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
 */
void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
					enum forcewake_domains fw_domains)
{}

static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
					 enum forcewake_domains fw_domains,
					 bool delayed)
{}

/**
 * intel_uncore_forcewake_put - release a forcewake domain reference
 * @uncore: the intel_uncore structure
 * @fw_domains: forcewake domains to put references
 *
 * This function drops the device-level forcewakes for specified
 * domains obtained by intel_uncore_forcewake_get().
 */
void intel_uncore_forcewake_put(struct intel_uncore *uncore,
				enum forcewake_domains fw_domains)
{}

void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
					enum forcewake_domains fw_domains)
{}

/**
 * intel_uncore_forcewake_flush - flush the delayed release
 * @uncore: the intel_uncore structure
 * @fw_domains: forcewake domains to flush
 */
void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
				  enum forcewake_domains fw_domains)
{}

/**
 * intel_uncore_forcewake_put__locked - release forcewake domain references
 * @uncore: the intel_uncore structure
 * @fw_domains: forcewake domains to put references
 *
 * See intel_uncore_forcewake_put(). This variant places the onus
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
 */
void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
					enum forcewake_domains fw_domains)
{}

void assert_forcewakes_inactive(struct intel_uncore *uncore)
{}

void assert_forcewakes_active(struct intel_uncore *uncore,
			      enum forcewake_domains fw_domains)
{}

/*
 * We give fast paths for the really cool registers.  The second range includes
 * media domains (and the GSC starting from Xe_LPM+)
 */
#define NEEDS_FORCE_WAKE(reg)

static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
{}

/* Copied and "macroized" from lib/bsearch.c */
#define BSEARCH(key, base, num, cmp)

static enum forcewake_domains
find_fw_domain(struct intel_uncore *uncore, u32 offset)
{}

/*
 * Shadowed register tables describe special register ranges that i915 is
 * allowed to write to without acquiring forcewake.  If these registers' power
 * wells are down, the hardware will save values written by i915 to a shadow
 * copy and automatically transfer them into the real register the next time
 * the power well is woken up.  Shadowing only applies to writes; forcewake
 * must still be acquired when reading from registers in these ranges.
 *
 * The documentation for shadowed registers is somewhat spotty on older
 * platforms.  However missing registers from these lists is non-fatal; it just
 * means we'll wake up the hardware for some register accesses where we didn't
 * really need to.
 *
 * The ranges listed in these tables must be sorted by offset.
 *
 * When adding new tables here, please also add them to
 * intel_shadow_table_check() in selftests/intel_uncore.c so that they will be
 * scanned for obvious mistakes or typos by the selftests.
 */

static const struct i915_range gen8_shadowed_regs[] =;

static const struct i915_range gen11_shadowed_regs[] =;

static const struct i915_range gen12_shadowed_regs[] =;

static const struct i915_range dg2_shadowed_regs[] =;

static const struct i915_range mtl_shadowed_regs[] =;

static const struct i915_range xelpmp_shadowed_regs[] =;

static int mmio_range_cmp(u32 key, const struct i915_range *range)
{}

static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
{}

static enum forcewake_domains
gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
{}

#define __fwtable_reg_read_fw_domains(uncore, offset)

#define __fwtable_reg_write_fw_domains(uncore, offset)

#define GEN_FW_RANGE(s, e, d)

/*
 * All platforms' forcewake tables below must be sorted by offset ranges.
 * Furthermore, new forcewake tables added should be "watertight" and have
 * no gaps between ranges.
 *
 * When there are multiple consecutive ranges listed in the bspec with
 * the same forcewake domain, it is customary to combine them into a single
 * row in the tables below to keep the tables small and lookups fast.
 * Likewise, reserved/unused ranges may be combined with the preceding and/or
 * following ranges since the driver will never be making MMIO accesses in
 * those ranges.
 *
 * For example, if the bspec were to list:
 *
 *    ...
 *    0x1000 - 0x1fff:  GT
 *    0x2000 - 0x2cff:  GT
 *    0x2d00 - 0x2fff:  unused/reserved
 *    0x3000 - 0xffff:  GT
 *    ...
 *
 * these could all be represented by a single line in the code:
 *
 *   GEN_FW_RANGE(0x1000, 0xffff, FORCEWAKE_GT)
 *
 * When adding new forcewake tables here, please also add them to
 * intel_uncore_mock_selftests in selftests/intel_uncore.c so that they will be
 * scanned for obvious mistakes or typos by the selftests.
 */

static const struct intel_forcewake_range __gen6_fw_ranges[] =;

static const struct intel_forcewake_range __vlv_fw_ranges[] =;

static const struct intel_forcewake_range __chv_fw_ranges[] =;

static const struct intel_forcewake_range __gen9_fw_ranges[] =;

static const struct intel_forcewake_range __gen11_fw_ranges[] =;

static const struct intel_forcewake_range __gen12_fw_ranges[] =;

static const struct intel_forcewake_range __dg2_fw_ranges[] =;

static const struct intel_forcewake_range __mtl_fw_ranges[] =;

/*
 * Note that the register ranges here are the final offsets after
 * translation of the GSI block to the 0x380000 offset.
 *
 * NOTE:  There are a couple MCR ranges near the bottom of this table
 * that need to power up either VD0 or VD2 depending on which replicated
 * instance of the register we're trying to access.  Our forcewake logic
 * at the moment doesn't have a good way to take steering into consideration,
 * and the driver doesn't even access any registers in those ranges today,
 * so for now we just mark those ranges as FORCEWAKE_ALL.  That will ensure
 * proper operation if we do start using the ranges in the future, and we
 * can determine at that time whether it's worth adding extra complexity to
 * the forcewake handling to take steering into consideration.
 */
static const struct intel_forcewake_range __xelpmp_fw_ranges[] =;

static void
ilk_dummy_write(struct intel_uncore *uncore)
{}

static void
__unclaimed_reg_debug(struct intel_uncore *uncore,
		      const i915_reg_t reg,
		      const bool read)
{}

static void
__unclaimed_previous_reg_debug(struct intel_uncore *uncore,
			       const i915_reg_t reg,
			       const bool read)
{}

static inline bool __must_check
unclaimed_reg_debug_header(struct intel_uncore *uncore,
			   const i915_reg_t reg, const bool read)
{}

static inline void
unclaimed_reg_debug_footer(struct intel_uncore *uncore,
			   const i915_reg_t reg, const bool read)
{}

#define __vgpu_read(x)
__vgpu_read()
__vgpu_read()
__vgpu_read()
__vgpu_read()

#define GEN2_READ_HEADER

#define GEN2_READ_FOOTER

#define __gen2_read

#define __gen5_read

__gen5_read(8)
__gen5_read(16)
__gen5_read(32)
__gen5_read(64)
__gen2_read(8)
__gen2_read(16)
__gen2_read(32)
__gen2_read(64)

#undef __gen5_read
#undef __gen2_read

#undef GEN2_READ_FOOTER
#undef GEN2_READ_HEADER

#define GEN6_READ_HEADER

#define GEN6_READ_FOOTER

static noinline void ___force_wake_auto(struct intel_uncore *uncore,
					enum forcewake_domains fw_domains)
{}

static inline void __force_wake_auto(struct intel_uncore *uncore,
				     enum forcewake_domains fw_domains)
{}

#define __gen_fwtable_read

static enum forcewake_domains
fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {}

__gen_fwtable_read(8)
__gen_fwtable_read(16)
__gen_fwtable_read(32)
__gen_fwtable_read(64)

#undef __gen_fwtable_read
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER

#define GEN2_WRITE_HEADER \

#define GEN2_WRITE_FOOTER

#define __gen2_write

#define __gen5_write

__gen5_write
__gen5_write
__gen5_write
__gen2_write
__gen2_write
__gen2_write

#undef __gen5_write
#undef __gen2_write

#undef GEN2_WRITE_FOOTER
#undef GEN2_WRITE_HEADER

#define GEN6_WRITE_HEADER

#define GEN6_WRITE_FOOTER

#define __gen6_write(x)
__gen6_write()
__gen6_write()
__gen6_write()

#define __gen_fwtable_write

static enum forcewake_domains
fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
{}

__gen_fwtable_write
__gen_fwtable_write
__gen_fwtable_write

#undef __gen_fwtable_write
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER

#define __vgpu_write(x)
__vgpu_write()
__vgpu_write()
__vgpu_write()

#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x)

#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x)

#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x)

#define ASSIGN_READ_MMIO_VFUNCS(uncore, x)

static int __fw_domain_init(struct intel_uncore *uncore,
			    enum forcewake_domain_id domain_id,
			    i915_reg_t reg_set,
			    i915_reg_t reg_ack)
{}

static void fw_domain_fini(struct intel_uncore *uncore,
			   enum forcewake_domain_id domain_id)
{}

static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
{}

static const struct intel_uncore_fw_get uncore_get_fallback =;

static const struct intel_uncore_fw_get uncore_get_normal =;

static const struct intel_uncore_fw_get uncore_get_thread_status =;

static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
{}

#define ASSIGN_FW_DOMAINS_TABLE(uncore, d)

#define ASSIGN_SHADOW_TABLE(uncore, d)

static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
					 unsigned long action, void *data)
{}

static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
{}

int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
{}

void intel_uncore_init_early(struct intel_uncore *uncore,
			     struct intel_gt *gt)
{}

static void uncore_raw_init(struct intel_uncore *uncore)
{}

static int uncore_media_forcewake_init(struct intel_uncore *uncore)
{}

static int uncore_forcewake_init(struct intel_uncore *uncore)
{}

static int sanity_check_mmio_access(struct intel_uncore *uncore)
{}

int intel_uncore_init_mmio(struct intel_uncore *uncore)
{}

/*
 * We might have detected that some engines are fused off after we initialized
 * the forcewake domains. Prune them, to make sure they only reference existing
 * engines.
 */
void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
					  struct intel_gt *gt)
{}

/*
 * The driver-initiated FLR is the highest level of reset that we can trigger
 * from within the driver. It is different from the PCI FLR in that it doesn't
 * fully reset the SGUnit and doesn't modify the PCI config space and therefore
 * it doesn't require a re-enumeration of the PCI BARs. However, the
 * driver-initiated FLR does still cause a reset of both GT and display and a
 * memory wipe of local and stolen memory, so recovery would require a full HW
 * re-init and saving/restoring (or re-populating) the wiped memory. Since we
 * perform the FLR as the very last action before releasing access to the HW
 * during the driver release flow, we don't attempt recovery at all, because
 * if/when a new instance of i915 is bound to the device it will do a full
 * re-init anyway.
 */
static void driver_initiated_flr(struct intel_uncore *uncore)
{}

/* Called via drm-managed action */
void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
{}

/**
 * __intel_wait_for_register_fw - wait until register matches expected state
 * @uncore: the struct intel_uncore
 * @reg: the register to read
 * @mask: mask to apply to register value
 * @value: expected value
 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
 * @slow_timeout_ms: slow timeout in millisecond
 * @out_value: optional placeholder to hold registry value
 *
 * This routine waits until the target register @reg contains the expected
 * @value after applying the @mask, i.e. it waits until ::
 *
 *     (intel_uncore_read_fw(uncore, reg) & mask) == value
 *
 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
 * must be not larger than 20,0000 microseconds.
 *
 * Note that this routine assumes the caller holds forcewake asserted, it is
 * not suitable for very long waits. See intel_wait_for_register() if you
 * wish to wait without holding forcewake for the duration (i.e. you expect
 * the wait to be slow).
 *
 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
 */
int __intel_wait_for_register_fw(struct intel_uncore *uncore,
				 i915_reg_t reg,
				 u32 mask,
				 u32 value,
				 unsigned int fast_timeout_us,
				 unsigned int slow_timeout_ms,
				 u32 *out_value)
{}

/**
 * __intel_wait_for_register - wait until register matches expected state
 * @uncore: the struct intel_uncore
 * @reg: the register to read
 * @mask: mask to apply to register value
 * @value: expected value
 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
 * @slow_timeout_ms: slow timeout in millisecond
 * @out_value: optional placeholder to hold registry value
 *
 * This routine waits until the target register @reg contains the expected
 * @value after applying the @mask, i.e. it waits until ::
 *
 *     (intel_uncore_read(uncore, reg) & mask) == value
 *
 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
 *
 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
 */
int __intel_wait_for_register(struct intel_uncore *uncore,
			      i915_reg_t reg,
			      u32 mask,
			      u32 value,
			      unsigned int fast_timeout_us,
			      unsigned int slow_timeout_ms,
			      u32 *out_value)
{}

bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
{}

bool
intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
{}

/**
 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
 * 				    a register
 * @uncore: pointer to struct intel_uncore
 * @reg: register in question
 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
 *
 * Returns a set of forcewake domains required to be taken with for example
 * intel_uncore_forcewake_get for the specified register to be accessible in the
 * specified mode (read, write or read/write) with raw mmio accessors.
 *
 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
 * callers to do FIFO management on their own or risk losing writes.
 */
enum forcewake_domains
intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
			       i915_reg_t reg, unsigned int op)
{}

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_uncore.c"
#include "selftests/intel_uncore.c"
#endif