linux/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c

/*
 * Copyright 2021 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#define SWSMU_CODE_LAYER_L2

#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "amdgpu_atombios.h"
#include "smu_v13_0_6_pmfw.h"
#include "smu13_driver_if_v13_0_6.h"
#include "smu_v13_0_6_ppsmc.h"
#include "soc15_common.h"
#include "atom.h"
#include "power_state.h"
#include "smu_v13_0.h"
#include "smu_v13_0_6_ppt.h"
#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "thm/thm_11_0_2_offset.h"
#include "thm/thm_11_0_2_sh_mask.h"
#include "amdgpu_xgmi.h"
#include <linux/pci.h>
#include "amdgpu_ras.h"
#include "amdgpu_mca.h"
#include "amdgpu_aca.h"
#include "smu_cmn.h"
#include "mp/mp_13_0_6_offset.h"
#include "mp/mp_13_0_6_sh_mask.h"
#include "umc_v12_0.h"

#undef MP1_Public
#undef smnMP1_FIRMWARE_FLAGS

/* TODO: Check final register offsets */
#define MP1_Public
#define smnMP1_FIRMWARE_FLAGS
/*
 * DO NOT use these for err/warn/info/debug messages.
 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
 * They are more MGPU friendly.
 */
#undef pr_err
#undef pr_warn
#undef pr_info
#undef pr_debug

MODULE_FIRMWARE();
MODULE_FIRMWARE();

#define to_amdgpu_device(x)

#define SMU_13_0_6_FEA_MAP(smu_feature, smu_13_0_6_feature)

#define FEATURE_MASK(feature)
#define SMC_DPM_FEATURE

/* possible frequency drift (1Mhz) */
#define EPSILON

#define smnPCIE_ESM_CTRL
#define smnPCIE_LC_LINK_WIDTH_CNTL
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT
#define MAX_LINK_WIDTH

#define smnPCIE_LC_SPEED_CNTL
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT
#define LINK_SPEED_MAX

#define SMU_13_0_6_DSCLK_THRESHOLD

#define MCA_BANK_IPID(_ip, _hwid, _type)

struct mca_bank_ipid {};

struct mca_ras_info {};

#define P2S_TABLE_ID_A
#define P2S_TABLE_ID_X

// clang-format off
static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] =;

// clang-format on
static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] =;

static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_COUNT] =;

#define TABLE_PMSTATUSLOG
#define TABLE_SMU_METRICS
#define TABLE_I2C_COMMANDS
#define TABLE_COUNT

static const struct cmn2asic_mapping smu_v13_0_6_table_map[SMU_TABLE_COUNT] =;

static const uint8_t smu_v13_0_6_throttler_map[] =;

struct PPTable_t {};

#define SMUQ10_TO_UINT(x)
#define SMUQ10_FRAC(x)
#define SMUQ10_ROUND(x)
#define GET_METRIC_FIELD(field)

struct smu_v13_0_6_dpm_map {};

static int smu_v13_0_6_init_microcode(struct smu_context *smu)
{}

static int smu_v13_0_6_tables_init(struct smu_context *smu)
{}

static int smu_v13_0_6_select_policy_soc_pstate(struct smu_context *smu,
						int policy)
{}

static int smu_v13_0_6_select_plpd_policy(struct smu_context *smu, int level)
{}

static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu)
{}

static int smu_v13_0_6_init_smc_tables(struct smu_context *smu)
{}

static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
						uint32_t *feature_mask,
						uint32_t num)
{}

static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
					 void *metrics_table, bool bypass_cache)
{}

static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
					  void *metrics, size_t max_size)
{}

static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{}

static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
					     enum smu_clk_type clk_type,
					     uint32_t *min, uint32_t *max)
{}

static int smu_v13_0_6_get_dpm_level_count(struct smu_context *smu,
					  enum smu_clk_type clk_type,
					  uint32_t *levels)
{}

static void smu_v13_0_6_pm_policy_init(struct smu_context *smu)
{}

static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
{}

static int smu_v13_0_6_setup_pptable(struct smu_context *smu)
{}

static int smu_v13_0_6_check_fw_status(struct smu_context *smu)
{}

static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
{}

static int smu_v13_0_6_get_clk_table(struct smu_context *smu,
				     struct pp_clock_levels_with_latency *clocks,
				     struct smu_13_0_dpm_table *dpm_table)
{}

static int smu_v13_0_6_freqs_in_same_level(int32_t frequency1,
					   int32_t frequency2)
{}

static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu)
{}

static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
					    MetricsMember_t member,
					    uint32_t *value)
{}

static int smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu,
						     enum smu_clk_type clk_type,
						     uint32_t *value)
{}

static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
				  struct smu_13_0_dpm_table *single_dpm_table,
				  uint32_t curr_clk, const char *clk_name)
{}

static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
					enum smu_clk_type type, char *buf)
{}

static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max,
					uint32_t feature_mask, uint32_t level)
{}

static int smu_v13_0_6_force_clk_levels(struct smu_context *smu,
					enum smu_clk_type type, uint32_t mask)
{}

static int smu_v13_0_6_get_current_activity_percent(struct smu_context *smu,
						    enum amd_pp_sensors sensor,
						    uint32_t *value)
{}

static int smu_v13_0_6_thermal_get_temperature(struct smu_context *smu,
					       enum amd_pp_sensors sensor,
					       uint32_t *value)
{}

static int smu_v13_0_6_read_sensor(struct smu_context *smu,
				   enum amd_pp_sensors sensor, void *data,
				   uint32_t *size)
{}

static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
						uint32_t *current_power_limit,
						uint32_t *default_power_limit,
						uint32_t *max_power_limit,
						uint32_t *min_power_limit)
{}

static int smu_v13_0_6_set_power_limit(struct smu_context *smu,
				       enum smu_ppt_limit_type limit_type,
				       uint32_t limit)
{}

static int smu_v13_0_6_irq_process(struct amdgpu_device *adev,
				   struct amdgpu_irq_src *source,
				   struct amdgpu_iv_entry *entry)
{}

static int smu_v13_0_6_set_irq_state(struct amdgpu_device *adev,
			      struct amdgpu_irq_src *source,
			      unsigned tyep,
			      enum amdgpu_interrupt_state state)
{}

static const struct amdgpu_irq_src_funcs smu_v13_0_6_irq_funcs =;

static int smu_v13_0_6_register_irq_handler(struct smu_context *smu)
{}

static int smu_v13_0_6_notify_unload(struct smu_context *smu)
{}

static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable)
{}

static int smu_v13_0_6_system_features_control(struct smu_context *smu,
					       bool enable)
{}

static int smu_v13_0_6_set_gfx_soft_freq_limited_range(struct smu_context *smu,
						       uint32_t min,
						       uint32_t max)
{}

static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
					     enum amd_dpm_forced_level level)
{}

static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
						   enum smu_clk_type clk_type,
						   uint32_t min, uint32_t max)
{}

static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
					  enum PP_OD_DPM_TABLE_COMMAND type,
					  long input[], uint32_t size)
{}

static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
					uint64_t *feature_mask)
{}

static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu)
{}

static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu,
					void *table_data)
{}

static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
				struct i2c_msg *msg, int num_msgs)
{}

static u32 smu_v13_0_6_i2c_func(struct i2c_adapter *adap)
{}

static const struct i2c_algorithm smu_v13_0_6_i2c_algo =;

static const struct i2c_adapter_quirks smu_v13_0_6_i2c_control_quirks =;

static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
{}

static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu)
{}

static void smu_v13_0_6_get_unique_id(struct smu_context *smu)
{}

static int smu_v13_0_6_get_bamaco_support(struct smu_context *smu)
{}

static const char *const throttling_logging_label[] =;

static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu)
{}

static int
smu_v13_0_6_get_current_pcie_link_width_level(struct smu_context *smu)
{}

static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
{}

static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{}

static void smu_v13_0_6_restore_pci_config(struct smu_context *smu)
{}

static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
{}

static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
						     struct smu_temperature_range *range)
{}

static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
{}

static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
{}

static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
{}

static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
						 uint32_t size)
{}

static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
{}

static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{}

static int smu_v13_0_6_get_valid_mca_count(struct smu_context *smu, enum amdgpu_mca_error_type type, uint32_t *count)
{}

static int __smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type,
				       int idx, int offset, uint32_t *val)
{}

static int smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type,
				     int idx, int offset, uint32_t *val, int count)
{}

static const struct mca_bank_ipid smu_v13_0_6_mca_ipid_table[AMDGPU_MCA_IP_COUNT] =;

static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_bank_info *info)
{}

static int mca_bank_read_reg(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
			     int idx, int reg_idx, uint64_t *val)
{}

static int mca_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
			     int idx, struct mca_bank_entry *entry)
{}

static int mca_decode_ipid_to_hwip(uint64_t val)
{}

static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
				     enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
{}

static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
					  enum amdgpu_mca_error_type type, struct mca_bank_entry *entry,
					  uint32_t *count)
{}

static bool mca_smu_check_error_code(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras,
				     uint32_t errcode)
{}

static int mca_gfx_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
				     enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
{}

static int mca_smu_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
				     enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
{}

static bool mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
				      enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
{
	uint32_t instlo;

	instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
	instlo &= GENMASK(31, 1);
	switch (instlo) {
	case 0x36430400: /* SMNAID XCD 0 */
	case 0x38430400: /* SMNAID XCD 1 */
	case 0x40430400: /* SMNXCD XCD 0, NOTE: FIXME: fix this error later */
		return true;
	default:
		return false;
	}

	return false;
};

static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
				  enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
{}

static int sdma_err_codes[] =;
static int mmhub_err_codes[] =;

static const struct mca_ras_info mca_ras_table[] =;

static const struct mca_ras_info *mca_get_mca_ras_info(struct amdgpu_device *adev, enum amdgpu_ras_block blkid)
{}

static int mca_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count)
{}

static bool mca_bank_is_valid(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras,
			      enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
{}

static int mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
					 struct mca_bank_entry *entry, uint32_t *count)
{}

static int mca_smu_get_mca_entry(struct amdgpu_device *adev,
				 enum amdgpu_mca_error_type type, int idx, struct mca_bank_entry *entry)
{}

static int mca_smu_get_valid_mca_count(struct amdgpu_device *adev,
				       enum amdgpu_mca_error_type type, uint32_t *count)
{}

static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs =;

static int aca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{}

static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_smu_type type, u32 *count)
{}

static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev,
				       enum aca_smu_type type, u32 *count)
{}

static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type,
				       int idx, int offset, u32 *val)
{}

static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type,
				     int idx, int offset, u32 *val, int count)
{}

static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_smu_type type,
			     int idx, int reg_idx, u64 *val)
{}

static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
				      enum aca_smu_type type, int idx, struct aca_bank *bank)
{}

static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
{}

static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs =;

static const struct pptable_funcs smu_v13_0_6_ppt_funcs =;

void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
{}