linux/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c

/*
 * Copyright 2019 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#define SWSMU_CODE_LAYER_L1

#include <linux/firmware.h>
#include <linux/pci.h>
#include <linux/power_supply.h>
#include <linux/reboot.h>

#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_internal.h"
#include "atom.h"
#include "arcturus_ppt.h"
#include "navi10_ppt.h"
#include "sienna_cichlid_ppt.h"
#include "renoir_ppt.h"
#include "vangogh_ppt.h"
#include "aldebaran_ppt.h"
#include "yellow_carp_ppt.h"
#include "cyan_skillfish_ppt.h"
#include "smu_v13_0_0_ppt.h"
#include "smu_v13_0_4_ppt.h"
#include "smu_v13_0_5_ppt.h"
#include "smu_v13_0_6_ppt.h"
#include "smu_v13_0_7_ppt.h"
#include "smu_v14_0_0_ppt.h"
#include "smu_v14_0_2_ppt.h"
#include "amd_pcie.h"

/*
 * DO NOT use these for err/warn/info/debug messages.
 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
 * They are more MGPU friendly.
 */
#undef pr_err
#undef pr_warn
#undef pr_info
#undef pr_debug

static const struct amd_pm_funcs swsmu_pm_funcs;
static int smu_force_smuclk_levels(struct smu_context *smu,
				   enum smu_clk_type clk_type,
				   uint32_t mask);
static int smu_handle_task(struct smu_context *smu,
			   enum amd_dpm_forced_level level,
			   enum amd_pp_task task_id);
static int smu_reset(struct smu_context *smu);
static int smu_set_fan_speed_pwm(void *handle, u32 speed);
static int smu_set_fan_control_mode(void *handle, u32 value);
static int smu_set_power_limit(void *handle, uint32_t limit);
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);

static int smu_sys_get_pp_feature_mask(void *handle,
				       char *buf)
{}

static int smu_sys_set_pp_feature_mask(void *handle,
				       uint64_t new_mask)
{}

int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
{}

int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
{}

int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
{}

int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
{}

int smu_set_soft_freq_range(struct smu_context *smu,
			    enum smu_clk_type clk_type,
			    uint32_t min,
			    uint32_t max)
{}

int smu_get_dpm_freq_range(struct smu_context *smu,
			   enum smu_clk_type clk_type,
			   uint32_t *min,
			   uint32_t *max)
{}

int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
{}

static u32 smu_get_mclk(void *handle, bool low)
{}

static u32 smu_get_sclk(void *handle, bool low)
{}

static int smu_set_gfx_imu_enable(struct smu_context *smu)
{}

static bool is_vcn_enabled(struct amdgpu_device *adev)
{}

static int smu_dpm_set_vcn_enable(struct smu_context *smu,
				  bool enable)
{}

static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
				   bool enable)
{}

static int smu_dpm_set_vpe_enable(struct smu_context *smu,
				   bool enable)
{}

static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
				   bool enable)
{}

static int smu_set_mall_enable(struct smu_context *smu)
{}

/**
 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
 *
 * @handle:        smu_context pointer
 * @block_type: the IP block to power gate/ungate
 * @gate:       to power gate if true, ungate otherwise
 *
 * This API uses no smu->mutex lock protection due to:
 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
 *    This is guarded to be race condition free by the caller.
 * 2. Or get called on user setting request of power_dpm_force_performance_level.
 *    Under this case, the smu->mutex lock protection is already enforced on
 *    the parent API smu_force_performance_level of the call path.
 */
static int smu_dpm_set_power_gate(void *handle,
				  uint32_t block_type,
				  bool gate)
{}

/**
 * smu_set_user_clk_dependencies - set user profile clock dependencies
 *
 * @smu:	smu_context pointer
 * @clk:	enum smu_clk_type type
 *
 * Enable/Disable the clock dependency for the @clk type.
 */
static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
{}

/**
 * smu_restore_dpm_user_profile - reinstate user dpm profile
 *
 * @smu:	smu_context pointer
 *
 * Restore the saved user power configurations include power limit,
 * clock frequencies, fan control mode and fan speed.
 */
static void smu_restore_dpm_user_profile(struct smu_context *smu)
{}

static int smu_get_power_num_states(void *handle,
				    struct pp_states_info *state_info)
{}

bool is_support_sw_smu(struct amdgpu_device *adev)
{}

bool is_support_cclk_dpm(struct amdgpu_device *adev)
{}


static int smu_sys_get_pp_table(void *handle,
				char **table)
{}

static int smu_sys_set_pp_table(void *handle,
				const char *buf,
				size_t size)
{}

static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
{}

static int smu_set_funcs(struct amdgpu_device *adev)
{}

static int smu_early_init(void *handle)
{}

static int smu_set_default_dpm_table(struct smu_context *smu)
{}

static int smu_apply_default_config_table_settings(struct smu_context *smu)
{}

static int smu_late_init(void *handle)
{}

static int smu_init_fb_allocations(struct smu_context *smu)
{}

static int smu_fini_fb_allocations(struct smu_context *smu)
{}

/**
 * smu_alloc_memory_pool - allocate memory pool in the system memory
 *
 * @smu: amdgpu_device pointer
 *
 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
 * and DramLogSetDramAddr can notify it changed.
 *
 * Returns 0 on success, error on failure.
 */
static int smu_alloc_memory_pool(struct smu_context *smu)
{}

static int smu_free_memory_pool(struct smu_context *smu)
{}

static int smu_alloc_dummy_read_table(struct smu_context *smu)
{}

static void smu_free_dummy_read_table(struct smu_context *smu)
{}

static int smu_smc_table_sw_init(struct smu_context *smu)
{}

static int smu_smc_table_sw_fini(struct smu_context *smu)
{}

static void smu_throttling_logging_work_fn(struct work_struct *work)
{}

static void smu_interrupt_work_fn(struct work_struct *work)
{}

static void smu_swctf_delayed_work_handler(struct work_struct *work)
{}

static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
{}

static int smu_sw_init(void *handle)
{}

static int smu_sw_fini(void *handle)
{}

static int smu_get_thermal_temperature_range(struct smu_context *smu)
{}

/**
 * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
 *
 * @smu: smu_context pointer
 *
 * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
 * Returns 0 on success, error on failure.
 */
static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
{}

/**
 * smu_wbrf_event_handler - handle notify events
 *
 * @nb: notifier block
 * @action: event type
 * @_arg: event data
 *
 * Calls relevant amdgpu function in response to wbrf event
 * notification from kernel.
 */
static int smu_wbrf_event_handler(struct notifier_block *nb,
				  unsigned long action, void *_arg)
{}

/**
 * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
 *
 * @work: struct work_struct pointer
 *
 * Flood is over and driver will consume the latest exclusion ranges.
 */
static void smu_wbrf_delayed_work_handler(struct work_struct *work)
{}

/**
 * smu_wbrf_support_check - check wbrf support
 *
 * @smu: smu_context pointer
 *
 * Verifies the ACPI interface whether wbrf is supported.
 */
static void smu_wbrf_support_check(struct smu_context *smu)
{}

/**
 * smu_wbrf_init - init driver wbrf support
 *
 * @smu: smu_context pointer
 *
 * Verifies the AMD ACPI interfaces and registers with the wbrf
 * notifier chain if wbrf feature is supported.
 * Returns 0 on success, error on failure.
 */
static int smu_wbrf_init(struct smu_context *smu)
{}

/**
 * smu_wbrf_fini - tear down driver wbrf support
 *
 * @smu: smu_context pointer
 *
 * Unregisters with the wbrf notifier chain.
 */
static void smu_wbrf_fini(struct smu_context *smu)
{}

static int smu_smc_hw_setup(struct smu_context *smu)
{}

static int smu_start_smc_engine(struct smu_context *smu)
{}

static int smu_hw_init(void *handle)
{}

static int smu_disable_dpms(struct smu_context *smu)
{}

static int smu_smc_hw_cleanup(struct smu_context *smu)
{}

static int smu_reset_mp1_state(struct smu_context *smu)
{}

static int smu_hw_fini(void *handle)
{}

static void smu_late_fini(void *handle)
{}

static int smu_reset(struct smu_context *smu)
{}

static int smu_suspend(void *handle)
{}

static int smu_resume(void *handle)
{}

static int smu_display_configuration_change(void *handle,
					    const struct amd_pp_display_configuration *display_config)
{}

static int smu_set_clockgating_state(void *handle,
				     enum amd_clockgating_state state)
{}

static int smu_set_powergating_state(void *handle,
				     enum amd_powergating_state state)
{}

static int smu_enable_umd_pstate(void *handle,
		      enum amd_dpm_forced_level *level)
{}

static int smu_bump_power_profile_mode(struct smu_context *smu,
					   long *param,
					   uint32_t param_size)
{}

static int smu_adjust_power_state_dynamic(struct smu_context *smu,
				   enum amd_dpm_forced_level level,
				   bool skip_display_settings)
{}

static int smu_handle_task(struct smu_context *smu,
			   enum amd_dpm_forced_level level,
			   enum amd_pp_task task_id)
{}

static int smu_handle_dpm_task(void *handle,
			       enum amd_pp_task task_id,
			       enum amd_pm_state_type *user_state)
{}

static int smu_switch_power_profile(void *handle,
				    enum PP_SMC_POWER_PROFILE type,
				    bool en)
{}

static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
{}

static int smu_force_performance_level(void *handle,
				       enum amd_dpm_forced_level level)
{}

static int smu_set_display_count(void *handle, uint32_t count)
{}

static int smu_force_smuclk_levels(struct smu_context *smu,
			 enum smu_clk_type clk_type,
			 uint32_t mask)
{}

static int smu_force_ppclk_levels(void *handle,
				  enum pp_clock_type type,
				  uint32_t mask)
{}

/*
 * On system suspending or resetting, the dpm_enabled
 * flag will be cleared. So that those SMU services which
 * are not supported will be gated.
 * However, the mp1 state setting should still be granted
 * even if the dpm_enabled cleared.
 */
static int smu_set_mp1_state(void *handle,
			     enum pp_mp1_state mp1_state)
{}

static int smu_set_df_cstate(void *handle,
			     enum pp_df_cstate state)
{}

int smu_write_watermarks_table(struct smu_context *smu)
{}

static int smu_set_watermarks_for_clock_ranges(void *handle,
					       struct pp_smu_wm_range_sets *clock_ranges)
{}

int smu_set_ac_dc(struct smu_context *smu)
{}

const struct amd_ip_funcs smu_ip_funcs =;

const struct amdgpu_ip_block_version smu_v11_0_ip_block =;

const struct amdgpu_ip_block_version smu_v12_0_ip_block =;

const struct amdgpu_ip_block_version smu_v13_0_ip_block =;

const struct amdgpu_ip_block_version smu_v14_0_ip_block =;

static int smu_load_microcode(void *handle)
{}

static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
{}

static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
{}

/**
 * smu_get_power_limit - Request one of the SMU Power Limits
 *
 * @handle: pointer to smu context
 * @limit: requested limit is written back to this variable
 * @pp_limit_level: &pp_power_limit_level which limit of the power to return
 * @pp_power_type: &pp_power_type type of power
 * Return:  0 on success, <0 on error
 *
 */
int smu_get_power_limit(void *handle,
			uint32_t *limit,
			enum pp_power_limit_level pp_limit_level,
			enum pp_power_type pp_power_type)
{}

static int smu_set_power_limit(void *handle, uint32_t limit)
{}

static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
{}

static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
{}

static int smu_print_ppclk_levels(void *handle,
				  enum pp_clock_type type,
				  char *buf)
{}

static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
{}

static int smu_od_edit_dpm_table(void *handle,
				 enum PP_OD_DPM_TABLE_COMMAND type,
				 long *input, uint32_t size)
{}

static int smu_read_sensor(void *handle,
			   int sensor,
			   void *data,
			   int *size_arg)
{}

static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
{}

static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
{}

static int smu_get_power_profile_mode(void *handle, char *buf)
{}

static int smu_set_power_profile_mode(void *handle,
				      long *param,
				      uint32_t param_size)
{}

static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
{}

static int smu_set_fan_control_mode(void *handle, u32 value)
{}

static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
{}

static int smu_set_fan_speed_pwm(void *handle, u32 speed)
{}

static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
{}

static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
{}

static int smu_get_clock_by_type_with_latency(void *handle,
					      enum amd_pp_clock_type type,
					      struct pp_clock_levels_with_latency *clocks)
{}

static int smu_display_clock_voltage_request(void *handle,
					     struct pp_display_clock_request *clock_req)
{}


static int smu_display_disable_memory_clock_switch(void *handle,
						   bool disable_memory_clock_switch)
{}

static int smu_set_xgmi_pstate(void *handle,
			       uint32_t pstate)
{}

static int smu_get_baco_capability(void *handle)
{}

static int smu_baco_set_state(void *handle, int state)
{}

bool smu_mode1_reset_is_support(struct smu_context *smu)
{}

bool smu_mode2_reset_is_support(struct smu_context *smu)
{}

int smu_mode1_reset(struct smu_context *smu)
{}

static int smu_mode2_reset(void *handle)
{}

static int smu_enable_gfx_features(void *handle)
{}

static int smu_get_max_sustainable_clocks_by_dc(void *handle,
						struct pp_smu_nv_clock_table *max_clocks)
{}

static int smu_get_uclk_dpm_states(void *handle,
				   unsigned int *clock_values_in_khz,
				   unsigned int *num_states)
{}

static enum amd_pm_state_type smu_get_current_power_state(void *handle)
{}

static int smu_get_dpm_clock_table(void *handle,
				   struct dpm_clocks *clock_table)
{}

static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
{}

static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
				      size_t size)
{}

static int smu_enable_mgpu_fan_boost(void *handle)
{}

static int smu_gfx_state_change_set(void *handle,
				    uint32_t state)
{}

int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
{}

int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
{}

static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
{}

static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf,
				 size_t *size)
{}

ssize_t smu_get_pm_policy_info(struct smu_context *smu,
			       enum pp_pm_policy p_type, char *sysbuf)
{}

struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
					 enum pp_pm_policy p_type)
{}

int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
		      int level)
{}

static const struct amd_pm_funcs swsmu_pm_funcs =;

int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
		       uint64_t event_arg)
{}

int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
{}

#if defined(CONFIG_DEBUG_FS)

static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
{}

static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
				loff_t *pos)
{}

static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
{}

/*
 * We have to define not only read method but also
 * open and release because .read takes up to PAGE_SIZE
 * data each time so and so is invoked multiple times.
 *  We allocate the STB buffer in .open and release it
 *  in .release
 */
static const struct file_operations smu_stb_debugfs_fops =;

#endif

void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
{}

int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
{}

int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
{}

int smu_send_rma_reason(struct smu_context *smu)
{}