linux/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c

// SPDX-License-Identifier: MIT
/*
 * Copyright 2021 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: AMD
 *
 */

#include "resource.h"
#include "clk_mgr.h"
#include "dchubbub.h"
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
#include "clk_mgr/dcn21/rn_clk_mgr.h"

#include "link.h"
#include "dcn20_fpu.h"
#include "dc_state_priv.h"

#define DC_LOGGER
#define DC_LOGGER_INIT(logger)

#ifndef MAX
#define MAX
#endif
#ifndef MIN
#define MIN
#endif

/* Constant */
#define LPDDR_MEM_RETRAIN_LATENCY

/**
 * DOC: DCN2x FPU manipulation Overview
 *
 * The DCN architecture relies on FPU operations, which require special
 * compilation flags and the use of kernel_fpu_begin/end functions; ideally, we
 * want to avoid spreading FPU access across multiple files. With this idea in
 * mind, this file aims to centralize all DCN20 and DCN2.1 (DCN2x) functions
 * that require FPU access in a single place. Code in this file follows the
 * following code pattern:
 *
 * 1. Functions that use FPU operations should be isolated in static functions.
 * 2. The FPU functions should have the noinline attribute to ensure anything
 *    that deals with FP register is contained within this call.
 * 3. All function that needs to be accessed outside this file requires a
 *    public interface that not uses any FPU reference.
 * 4. Developers **must not** use DC_FP_START/END in this file, but they need
 *    to ensure that the caller invokes it before access any function available
 *    in this file. For this reason, public functions in this file must invoke
 *    dc_assert_fp_enabled();
 *
 * Let's expand a little bit more the idea in the code pattern. To fully
 * isolate FPU operations in a single place, we must avoid situations where
 * compilers spill FP values to registers due to FP enable in a specific C
 * file. Note that even if we isolate all FPU functions in a single file and
 * call its interface from other files, the compiler might enable the use of
 * FPU before we call DC_FP_START. Nevertheless, it is the programmer's
 * responsibility to invoke DC_FP_START/END in the correct place. To highlight
 * situations where developers forgot to use the FP protection before calling
 * the DC FPU interface functions, we introduce a helper that checks if the
 * function is invoked under FP protection. If not, it will trigger a kernel
 * warning.
 */

struct _vcs_dpi_ip_params_st dcn2_0_ip =;

struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip =;

struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc =;

struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc =;

struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc =;

struct _vcs_dpi_ip_params_st dcn2_1_ip =;

struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc =;

struct wm_table ddr4_wm_table_gs =;

struct wm_table lpddr4_wm_table_gs =;

struct wm_table lpddr4_wm_table_with_disabled_ppt =;

struct wm_table ddr4_wm_table_rn =;

struct wm_table ddr4_1R_wm_table_rn =;

struct wm_table lpddr4_wm_table_rn =;

void dcn20_populate_dml_writeback_from_context(struct dc *dc,
					       struct resource_context *res_ctx,
					       display_e2e_pipe_params_st *pipes)
{}

void dcn20_fpu_set_wb_arb_params(struct mcif_arb_params *wb_arb_params,
				 struct dc_state *context,
				 display_e2e_pipe_params_st *pipes,
				 int pipe_cnt, int i)
{}

static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
{}

static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struct dc_state *context)
{}

static void dcn20_adjust_freesync_v_startup(
		const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
{}

void dcn20_calculate_dlg_params(struct dc *dc,
				struct dc_state *context,
				display_e2e_pipe_params_st *pipes,
				int pipe_cnt,
				int vlevel)
{}

static void swizzle_to_dml_params(
		enum swizzle_mode_values swizzle,
		unsigned int *sw_mode)
{}

int dcn20_populate_dml_pipes_from_context(struct dc *dc,
					  struct dc_state *context,
					  display_e2e_pipe_params_st *pipes,
					  bool fast_validate)
{}

void dcn20_calculate_wm(struct dc *dc, struct dc_state *context,
			display_e2e_pipe_params_st *pipes,
			int *out_pipe_cnt,
			int *pipe_split_from,
			int vlevel,
			bool fast_validate)
{}

void dcn20_update_bounding_box(struct dc *dc,
			       struct _vcs_dpi_soc_bounding_box_st *bb,
			       struct pp_smu_nv_clock_table *max_clocks,
			       unsigned int *uclk_states,
			       unsigned int num_states)
{}

void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb,
			  struct pp_smu_nv_clock_table max_clocks)
{}

void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
{}

static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
		bool fast_validate, display_e2e_pipe_params_st *pipes)
{}

bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
				 bool fast_validate, display_e2e_pipe_params_st *pipes)
{}

void dcn20_fpu_set_wm_ranges(int i,
			     struct pp_smu_wm_range_sets *ranges,
			     struct _vcs_dpi_soc_bounding_box_st *loaded_bb)
{}

void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
			     int vlevel,
			     int max_mpc_comb,
			     int pipe_idx,
			     bool is_validating_bw)
{}

int dcn21_populate_dml_pipes_from_context(struct dc *dc,
					  struct dc_state *context,
					  display_e2e_pipe_params_st *pipes,
					  bool fast_validate)
{}

static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
{}

static void calculate_wm_set_for_vlevel(int vlevel,
					struct wm_range_table_entry *table_entry,
					struct dcn_watermarks *wm_set,
					struct display_mode_lib *dml,
					display_e2e_pipe_params_st *pipes,
					int pipe_cnt)
{}

static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
			display_e2e_pipe_params_st *pipes,
			int *out_pipe_cnt,
			int *pipe_split_from,
			int vlevel_req,
			bool fast_validate)
{}

bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
				 bool fast_validate, display_e2e_pipe_params_st *pipes)
{}

static struct _vcs_dpi_voltage_scaling_st construct_low_pstate_lvl(struct clk_limit_table *clk_table, unsigned int high_voltage_lvl)
{}

void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{}

void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params)
{}

void dcn201_populate_dml_writeback_from_context_fpu(struct dc *dc,
						    struct resource_context *res_ctx,
						    display_e2e_pipe_params_st *pipes)
{}