#include "amdgpu.h"
#include "df_v3_6.h"
#include "df/df_3_6_default.h"
#include "df/df_3_6_offset.h"
#include "df/df_3_6_sh_mask.h"
#define DF_3_6_SMN_REG_INST_DIST …
#define DF_3_6_INST_CNT …
#define DF_V3_6_MAX_COUNTERS …
#define DF_V3_6_GET_EVENT(x) …
#define DF_V3_6_GET_INSTANCE(x) …
#define DF_V3_6_GET_UNITMASK(x) …
#define DF_V3_6_PERFMON_OVERFLOW …
static u32 df_v3_6_channel_number[] = …;
static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
uint32_t ficaa_val)
{ … }
static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
uint32_t ficadl_val, uint32_t ficadh_val)
{ … }
static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
uint32_t lo_addr, uint32_t *lo_val,
uint32_t hi_addr, uint32_t *hi_val)
{ … }
static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
uint32_t lo_val, uint32_t hi_addr, uint32_t hi_val)
{ … }
static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
uint32_t lo_addr, uint32_t lo_val,
uint32_t hi_addr, uint32_t hi_val)
{ … }
#define ARM_RETRY_USEC_TIMEOUT …
#define ARM_RETRY_USEC_INTERVAL …
static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
uint32_t lo_addr, uint32_t lo_val,
uint32_t hi_addr, uint32_t hi_val)
{ … }
static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
struct device_attribute *attr,
char *buf)
{ … }
static DEVICE_ATTR(df_cntr_avail, S_IRUGO, df_v3_6_get_df_cntr_avail, NULL);
static void df_v3_6_query_hashes(struct amdgpu_device *adev)
{ … }
static void df_v3_6_sw_init(struct amdgpu_device *adev)
{ … }
static void df_v3_6_sw_fini(struct amdgpu_device *adev)
{ … }
static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
bool enable)
{ … }
static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev)
{ … }
static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
{ … }
static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{ … }
static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags)
{ … }
static bool df_v3_6_pmc_has_counter(struct amdgpu_device *adev,
uint64_t config,
int counter_idx)
{ … }
static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
uint64_t config,
int counter_idx,
int is_ctrl,
uint32_t *lo_base_addr,
uint32_t *hi_base_addr)
{ … }
static void df_v3_6_pmc_get_read_settings(struct amdgpu_device *adev,
uint64_t config,
int counter_idx,
uint32_t *lo_base_addr,
uint32_t *hi_base_addr)
{ … }
static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
uint64_t config,
int counter_idx,
uint32_t *lo_base_addr,
uint32_t *hi_base_addr,
uint32_t *lo_val,
uint32_t *hi_val,
bool is_enable)
{ … }
static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
uint64_t config)
{ … }
#define DEFERRED_ARM_MASK …
static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
uint64_t config, int counter_idx,
bool is_deferred)
{ … }
static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
uint64_t config,
int counter_idx)
{ … }
static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
uint64_t config,
int counter_idx)
{ … }
static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
uint64_t config,
int counter_idx)
{ … }
static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
int counter_idx, int is_add)
{ … }
static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
int counter_idx, int is_remove)
{ … }
static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
uint64_t config,
int counter_idx,
uint64_t *count)
{ … }
static bool df_v3_6_query_ras_poison_mode(struct amdgpu_device *adev)
{ … }
const struct amdgpu_df_funcs df_v3_6_funcs = …;