linux/drivers/gpu/drm/i915/display/skl_watermark.c

// SPDX-License-Identifier: MIT
/*
 * Copyright © 2022 Intel Corporation
 */

#include <drm/drm_blend.h>

#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_crtc.h"
#include "intel_cursor_regs.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fixed.h"
#include "intel_pcode.h"
#include "intel_wm.h"
#include "skl_universal_plane_regs.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"

/*It is expected that DSB can do posted writes to every register in
 * the pipe and planes within 100us. For flip queue use case, the
 * recommended DSB execution time is 100us + one SAGV block time.
 */
#define DSB_EXE_TIME

static void skl_sagv_disable(struct drm_i915_private *i915);

/* Stores plane specific WM parameters */
struct skl_wm_params {};

u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915)
{}

/*
 * FIXME: We still don't have the proper code detect if we need to apply the WA,
 * so assume we'll always need it in order to avoid underruns.
 */
static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
{}

bool
intel_has_sagv(struct drm_i915_private *i915)
{}

static u32
intel_sagv_block_time(struct drm_i915_private *i915)
{}

static void intel_sagv_init(struct drm_i915_private *i915)
{}

/*
 * SAGV dynamically adjusts the system agent voltage and clock frequencies
 * depending on power and performance requirements. The display engine access
 * to system memory is blocked during the adjustment time. Because of the
 * blocking time, having this enabled can cause full system hangs and/or pipe
 * underruns if we don't meet all of the following requirements:
 *
 *  - <= 1 pipe enabled
 *  - All planes can enable watermarks for latencies >= SAGV engine block time
 *  - We're not using an interlaced display configuration
 */
static void skl_sagv_enable(struct drm_i915_private *i915)
{}

static void skl_sagv_disable(struct drm_i915_private *i915)
{}

static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
{}

static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
{}

static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
{}

static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
{}

void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
{}

void intel_sagv_post_plane_update(struct intel_atomic_state *state)
{}

static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
{}

static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
{}

static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
{}

bool intel_can_enable_sagv(struct drm_i915_private *i915,
			   const struct intel_bw_state *bw_state)
{}

static int intel_compute_sagv_mask(struct intel_atomic_state *state)
{}

static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
			      u16 start, u16 end)
{}

static int intel_dbuf_slice_size(struct drm_i915_private *i915)
{}

static void
skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
			 struct skl_ddb_entry *ddb)
{}

static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
{}

u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
			    const struct skl_ddb_entry *entry)
{}

static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
{}

static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
				    enum pipe for_pipe,
				    unsigned int *weight_start,
				    unsigned int *weight_end,
				    unsigned int *weight_total)
{}

static int
skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
{}

static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
				 int width, const struct drm_format_info *format,
				 u64 modifier, unsigned int rotation,
				 u32 plane_pixel_rate, struct skl_wm_params *wp,
				 int color_plane);

static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
				 struct intel_plane *plane,
				 int level,
				 unsigned int latency,
				 const struct skl_wm_params *wp,
				 const struct skl_wm_level *result_prev,
				 struct skl_wm_level *result /* out */);

static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
				   const struct skl_wm_params *wp)
{}

static unsigned int
skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
		      int num_active)
{}

static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
{}

static void
skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
			   const enum pipe pipe,
			   const enum plane_id plane_id,
			   struct skl_ddb_entry *ddb,
			   struct skl_ddb_entry *ddb_y)
{}

static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
				      struct skl_ddb_entry *ddb,
				      struct skl_ddb_entry *ddb_y)
{}

struct dbuf_slice_conf_entry {};

/*
 * Table taken from Bspec 12716
 * Pipes do have some preferred DBuf slice affinity,
 * plus there are some hardcoded requirements on how
 * those should be distributed for multipipe scenarios.
 * For more DBuf slices algorithm can get even more messy
 * and less readable, so decided to use a table almost
 * as is from BSpec itself - that way it is at least easier
 * to compare, change and check.
 */
static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =/* Autogenerated with igt/tools/intel_dbuf_map tool: */
{};

/*
 * Table taken from Bspec 49255
 * Pipes do have some preferred DBuf slice affinity,
 * plus there are some hardcoded requirements on how
 * those should be distributed for multipipe scenarios.
 * For more DBuf slices algorithm can get even more messy
 * and less readable, so decided to use a table almost
 * as is from BSpec itself - that way it is at least easier
 * to compare, change and check.
 */
static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =/* Autogenerated with igt/tools/intel_dbuf_map tool: */
{};

static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] =;

static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] =;

static bool check_mbus_joined(u8 active_pipes,
			      const struct dbuf_slice_conf_entry *dbuf_slices)
{}

static bool adlp_check_mbus_joined(u8 active_pipes)
{}

static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
			      const struct dbuf_slice_conf_entry *dbuf_slices)
{}

/*
 * This function finds an entry with same enabled pipe configuration and
 * returns correspondent DBuf slice mask as stated in BSpec for particular
 * platform.
 */
static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{}

static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{}

static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{}

static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{}

static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
{}

static bool
use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
		     struct intel_plane *plane)
{}

static u64
skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
{}

const struct skl_wm_level *
skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
		   enum plane_id plane_id,
		   int level)
{}

const struct skl_wm_level *
skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
		   enum plane_id plane_id)
{}

/*
 * We only disable the watermarks for each plane if
 * they exceed the ddb allocation of said plane. This
 * is done so that we don't end up touching cursor
 * watermarks needlessly when some other plane reduces
 * our max possible watermark level.
 *
 * Bspec has this to say about the PLANE_WM enable bit:
 * "All the watermarks at this level for all enabled
 *  planes must be enabled before the level will be used."
 * So this is actually safe to do.
 */
static void
skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
{}

static void
skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
			const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
{}

static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level,
				const struct skl_plane_wm *wm)
{}

struct skl_plane_ddb_iter {};

static void
skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
		       struct skl_ddb_entry *ddb,
		       const struct skl_wm_level *wm,
		       u64 data_rate)
{}

static int
skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
			    struct intel_crtc *crtc)
{}

/*
 * The max latency should be 257 (max the punit can code is 255 and we add 2us
 * for the read latency) and cpp should always be <= 8, so that
 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
 */
static uint_fixed_16_16_t
skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
	       u8 cpp, u32 latency, u32 dbuf_block_size)
{}

static uint_fixed_16_16_t
skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
	       uint_fixed_16_16_t plane_blocks_per_line)
{}

static uint_fixed_16_16_t
intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
{}

static int
skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
		      int width, const struct drm_format_info *format,
		      u64 modifier, unsigned int rotation,
		      u32 plane_pixel_rate, struct skl_wm_params *wp,
		      int color_plane)
{}

static int
skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
			    const struct intel_plane_state *plane_state,
			    struct skl_wm_params *wp, int color_plane)
{}

static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
{}

static int skl_wm_max_lines(struct drm_i915_private *i915)
{}

static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
				 struct intel_plane *plane,
				 int level,
				 unsigned int latency,
				 const struct skl_wm_params *wp,
				 const struct skl_wm_level *result_prev,
				 struct skl_wm_level *result /* out */)
{}

static void
skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
		      struct intel_plane *plane,
		      const struct skl_wm_params *wm_params,
		      struct skl_wm_level *levels)
{}

static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
				struct intel_plane *plane,
				const struct skl_wm_params *wm_params,
				struct skl_plane_wm *plane_wm)
{}

static void skl_compute_transition_wm(struct drm_i915_private *i915,
				      struct skl_wm_level *trans_wm,
				      const struct skl_wm_level *wm0,
				      const struct skl_wm_params *wp)
{}

static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
				     const struct intel_plane_state *plane_state,
				     struct intel_plane *plane, int color_plane)
{}

static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
				 const struct intel_plane_state *plane_state,
				 struct intel_plane *plane)
{}

static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
			      const struct intel_plane_state *plane_state)
{}

static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
			      const struct intel_plane_state *plane_state)
{}

static bool
skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state,
			int wm0_lines, int latency)
{}

static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
{}

static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
				       int wm0_lines)
{}

static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
{}

static int skl_build_pipe_wm(struct intel_atomic_state *state,
			     struct intel_crtc *crtc)
{}

static bool skl_wm_level_equals(const struct skl_wm_level *l1,
				const struct skl_wm_level *l2)
{}

static bool skl_plane_wm_equals(struct drm_i915_private *i915,
				const struct skl_plane_wm *wm1,
				const struct skl_plane_wm *wm2)
{}

static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
				    const struct skl_ddb_entry *b)
{}

static void skl_ddb_entry_union(struct skl_ddb_entry *a,
				const struct skl_ddb_entry *b)
{}

bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
				 const struct skl_ddb_entry *entries,
				 int num_entries, int ignore_idx)
{}

static int
skl_ddb_add_affected_planes(struct intel_atomic_state *state,
			    struct intel_crtc *crtc)
{}

static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
{}

static int
skl_compute_ddb(struct intel_atomic_state *state)
{}

static char enast(bool enable)
{}

static void
skl_print_wm_changes(struct intel_atomic_state *state)
{}

static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
					 const struct skl_pipe_wm *old_pipe_wm,
					 const struct skl_pipe_wm *new_pipe_wm)
{}

/*
 * To make sure the cursor watermark registers are always consistent
 * with our computed state the following scenario needs special
 * treatment:
 *
 * 1. enable cursor
 * 2. move cursor entirely offscreen
 * 3. disable cursor
 *
 * Step 2. does call .disable_plane() but does not zero the watermarks
 * (since we consider an offscreen cursor still active for the purposes
 * of watermarks). Step 3. would not normally call .disable_plane()
 * because the actual plane visibility isn't changing, and we don't
 * deallocate the cursor ddb until the pipe gets disabled. So we must
 * force step 3. to call .disable_plane() to update the watermark
 * registers properly.
 *
 * Other planes do not suffer from this issues as their watermarks are
 * calculated based on the actual plane visibility. The only time this
 * can trigger for the other planes is during the initial readout as the
 * default value of the watermarks registers is not zero.
 */
static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
				      struct intel_crtc *crtc)
{}

/*
 * If Fixed Refresh Rate or For VRR case Vmin = Vmax = Flipline:
 * Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from
 * watermark level1 and up and above. If watermark level 1 is
 * invalid program it with all 1's.
 * Program PKG_C_LATENCY Added Wake Time = DSB execution time
 * If Variable Refresh Rate where Vmin != Vmax != Flipline:
 * Program DEEP PKG_C_LATENCY Pkg C with all 1's.
 * Program PKG_C_LATENCY Added Wake Time = 0
 */
static void
skl_program_dpkgc_latency(struct drm_i915_private *i915, bool enable_dpkgc)
{}

static int
skl_compute_wm(struct intel_atomic_state *state)
{}

static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
{}

static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
				     struct skl_pipe_wm *out)
{}

static void skl_wm_get_hw_state(struct drm_i915_private *i915)
{}

static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
{}

static void skl_wm_sanitize(struct drm_i915_private *i915)
{}

static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
{}

void intel_wm_state_verify(struct intel_atomic_state *state,
			   struct intel_crtc *crtc)
{}

bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
{}

void skl_watermark_ipc_update(struct drm_i915_private *i915)
{}

static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
{}

void skl_watermark_ipc_init(struct drm_i915_private *i915)
{}

static void
adjust_wm_latency(struct drm_i915_private *i915,
		  u16 wm[], int num_levels, int read_latency)
{}

static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{}

static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{}

static void skl_setup_wm_latency(struct drm_i915_private *i915)
{}

static const struct intel_wm_funcs skl_wm_funcs =;

void skl_wm_init(struct drm_i915_private *i915)
{}

static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
{}

static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
				     struct intel_global_state *state)
{}

static const struct intel_global_state_funcs intel_dbuf_funcs =;

struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
{}

int intel_dbuf_init(struct drm_i915_private *i915)
{}

static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
{}

static void intel_mbus_dbox_update(struct intel_atomic_state *state)
{}

int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
					   int ratio)
{}

void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
					 int ratio, bool joined_mbus)
{}

static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
{}

static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
					const struct intel_dbuf_state *dbuf_state)
{}

static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
					enum pipe pipe)
{}

void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
{}

void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state)
{}

void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
{}

void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
{}

static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
{}

static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
{}

static ssize_t skl_watermark_ipc_status_write(struct file *file,
					      const char __user *ubuf,
					      size_t len, loff_t *offp)
{}

static const struct file_operations skl_watermark_ipc_status_fops =;

static int intel_sagv_status_show(struct seq_file *m, void *unused)
{}

DEFINE_SHOW_ATTRIBUTE();

void skl_watermark_debugfs_register(struct drm_i915_private *i915)
{}

unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, int initial_wm_level)
{}