linux/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c

/*
 * Copyright 2020 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
#include "amdgpu.h"
#include "soc15.h"
#include "soc15d.h"

#include "gc/gc_9_4_2_offset.h"
#include "gc/gc_9_4_2_sh_mask.h"
#include "gfx_v9_0.h"

#include "gfx_v9_4_2.h"
#include "amdgpu_ras.h"
#include "amdgpu_gfx.h"

#define SE_ID_MAX
#define CU_ID_MAX
#define SIMD_ID_MAX
#define WAVE_ID_MAX

enum gfx_v9_4_2_utc_type {};

struct gfx_v9_4_2_utc_block {};

static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde_die_0[] =;

static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde_die_1[] =;

static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde[] =;

/*
 * This shader is used to clear VGPRS and LDS, and also write the input
 * pattern into the write back buffer, which will be used by driver to
 * check whether all SIMDs have been covered.
*/
static const u32 vgpr_init_compute_shader_aldebaran[] =;

const struct soc15_reg_entry vgpr_init_regs_aldebaran[] =;

/*
 * The below shaders are used to clear SGPRS, and also write the input
 * pattern into the write back buffer. The first two dispatch should be
 * scheduled simultaneously which make sure that all SGPRS could be
 * allocated, so the dispatch 1 need check write back buffer before scheduled,
 * make sure that waves of dispatch 0 are all dispacthed to all simds
 * balanced. both dispatch 0 and dispatch 1 should be halted until all waves
 * are dispatched, and then driver write a pattern to the shared memory to make
 * all waves continue.
*/
static const u32 sgpr112_init_compute_shader_aldebaran[] =;

const struct soc15_reg_entry sgpr112_init_regs_aldebaran[] =;

static const u32 sgpr96_init_compute_shader_aldebaran[] =;

const struct soc15_reg_entry sgpr96_init_regs_aldebaran[] =;

/*
 * This shader is used to clear the uninitiated sgprs after the above
 * two dispatches, because of hardware feature, dispath 0 couldn't clear
 * top hole sgprs. Therefore need 4 waves per SIMD to cover these sgprs
*/
static const u32 sgpr64_init_compute_shader_aldebaran[] =;

const struct soc15_reg_entry sgpr64_init_regs_aldebaran[] =;

static int gfx_v9_4_2_run_shader(struct amdgpu_device *adev,
				 struct amdgpu_ring *ring,
				 struct amdgpu_ib *ib,
				 const u32 *shader_ptr, u32 shader_size,
				 const struct soc15_reg_entry *init_regs, u32 regs_size,
				 u32 compute_dim_x, u64 wb_gpu_addr, u32 pattern,
				 struct dma_fence **fence_ptr)
{}

static void gfx_v9_4_2_log_wave_assignment(struct amdgpu_device *adev, uint32_t *wb_ptr)
{}

static int gfx_v9_4_2_wait_for_waves_assigned(struct amdgpu_device *adev,
					      uint32_t *wb_ptr, uint32_t mask,
					      uint32_t pattern, uint32_t num_wave, bool wait)
{}

static int gfx_v9_4_2_do_sgprs_init(struct amdgpu_device *adev)
{}

static int gfx_v9_4_2_do_vgprs_init(struct amdgpu_device *adev)
{}

int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev)
{}

static void gfx_v9_4_2_query_sq_timeout_status(struct amdgpu_device *adev);
static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev);

void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
				      uint32_t die_id)
{}

void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
				uint32_t first_vmid,
				uint32_t last_vmid)
{}

void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev)
{}

static const struct soc15_reg_entry gfx_v9_4_2_edc_counter_regs[] =;

static void gfx_v9_4_2_select_se_sh(struct amdgpu_device *adev, u32 se_num,
				  u32 sh_num, u32 instance)
{}

static const struct soc15_ras_field_entry gfx_v9_4_2_ras_fields[] =;

static const char * const vml2_walker_mems[] =;

static struct gfx_v9_4_2_utc_block gfx_v9_4_2_utc_blocks[] =;

static const struct soc15_reg_entry gfx_v9_4_2_ea_err_status_regs =;

static int gfx_v9_4_2_get_reg_error_count(struct amdgpu_device *adev,
					  const struct soc15_reg_entry *reg,
					  uint32_t se_id, uint32_t inst_id,
					  uint32_t value, uint32_t *sec_count,
					  uint32_t *ded_count)
{}

static int gfx_v9_4_2_query_sram_edc_count(struct amdgpu_device *adev,
				uint32_t *sec_count, uint32_t *ded_count)
{}

static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev,
					 struct gfx_v9_4_2_utc_block *blk,
					 uint32_t instance, uint32_t sec_cnt,
					 uint32_t ded_cnt)
{}

static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev,
					  uint32_t *sec_count,
					  uint32_t *ded_count)
{}

static void gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
					    void *ras_error_status)
{}

static void gfx_v9_4_2_reset_utc_err_status(struct amdgpu_device *adev)
{}

static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev)
{}

static void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev)
{}

static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev)
{}

static void gfx_v9_4_2_query_utc_err_status(struct amdgpu_device *adev)
{}

static void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev)
{}

static void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev)
{}

static void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev)
{}

static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{}

static void gfx_v9_4_2_log_cu_timeout_status(struct amdgpu_device *adev,
					uint32_t status)
{}

static void gfx_v9_4_2_query_sq_timeout_status(struct amdgpu_device *adev)
{}

static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev)
{}



struct amdgpu_ras_block_hw_ops  gfx_v9_4_2_ras_ops =;

struct amdgpu_gfx_ras gfx_v9_4_2_ras =;