linux/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c

/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
#include <linux/firmware.h>
#include <linux/module.h>

#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "amdgpu_gfx.h"
#include "amdgpu_ucode.h"
#include "clearstate_si.h"
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
#include "gca/gfx_6_0_d.h"
#include "gca/gfx_6_0_sh_mask.h"
#include "gmc/gmc_6_0_d.h"
#include "gmc/gmc_6_0_sh_mask.h"
#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"
#include "gca/gfx_7_2_enum.h"
#include "si_enums.h"
#include "si.h"

static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);

MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();

MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();

MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();

MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();

MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();

static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v6_0_init_pg(struct amdgpu_device *adev);

#define ARRAY_MODE(x)
#define PIPE_CONFIG(x)
#define TILE_SPLIT(x)
#define MICRO_TILE_MODE(x)
#define SAMPLE_SPLIT(x)
#define BANK_WIDTH(x)
#define BANK_HEIGHT(x)
#define MACRO_TILE_ASPECT(x)
#define NUM_BANKS(x)

static const u32 verde_rlc_save_restore_register_list[] =;

static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
{}

static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
{}

static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
				  u32 sh_num, u32 instance, int xcc_id)
{}

static u32 gfx_v6_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{}

static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
{}

static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
						    u32 raster_config, unsigned rb_mask,
						    unsigned num_rb)
{}

static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
{}

static void gfx_v6_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
						 u32 bitmap)
{}

static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev)
{}


static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
{}

static void gfx_v6_0_config_init(struct amdgpu_device *adev)
{}

static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
{}

static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
{}

static void gfx_v6_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
{}

static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
				     u64 seq, unsigned flags)
{}

static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
				  struct amdgpu_job *job,
				  struct amdgpu_ib *ib,
				  uint32_t flags)
{}

/**
 * gfx_v6_0_ring_test_ib - basic ring IB test
 *
 * @ring: amdgpu_ring structure holding ring information
 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 *
 * Allocate an IB and execute it on the gfx ring (SI).
 * Provides a basic gfx ring test to verify that IBs are working.
 * Returns 0 on success, error on failure.
 */
static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{}

static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{}

static int gfx_v6_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
{}

static int gfx_v6_0_cp_gfx_start(struct amdgpu_device *adev)
{}

static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
{}

static u64 gfx_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
{}

static u64 gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
{}

static void gfx_v6_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
{}

static void gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{}

static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
{}

static void gfx_v6_0_cp_enable(struct amdgpu_device *adev, bool enable)
{}

static int gfx_v6_0_cp_load_microcode(struct amdgpu_device *adev)
{}

static void gfx_v6_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
					       bool enable)
{}

static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
{}

static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{}

static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
					unsigned vmid, uint64_t pd_addr)
{}

static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
				    uint32_t reg, uint32_t val)
{}

static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
{}

static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
{}

static void gfx_v6_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
{}

static void gfx_v6_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
{}

static u32 gfx_v6_0_halt_rlc(struct amdgpu_device *adev)
{}

static void gfx_v6_0_rlc_stop(struct amdgpu_device *adev)
{}

static void gfx_v6_0_rlc_start(struct amdgpu_device *adev)
{}

static void gfx_v6_0_rlc_reset(struct amdgpu_device *adev)
{}

static bool gfx_v6_0_lbpw_supported(struct amdgpu_device *adev)
{}

static void gfx_v6_0_init_cg(struct amdgpu_device *adev)
{}

static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
{}

static void gfx_v6_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
{}

static void gfx_v6_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
{}
/*
static void gfx_v6_0_update_cg(struct amdgpu_device *adev,
			       bool enable)
{
	gfx_v6_0_enable_gui_idle_interrupt(adev, false);
	if (enable) {
		gfx_v6_0_enable_mgcg(adev, true);
		gfx_v6_0_enable_cgcg(adev, true);
	} else {
		gfx_v6_0_enable_cgcg(adev, false);
		gfx_v6_0_enable_mgcg(adev, false);
	}
	gfx_v6_0_enable_gui_idle_interrupt(adev, true);
}
*/

static void gfx_v6_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
						bool enable)
{}

static void gfx_v6_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
						bool enable)
{}

static void gfx_v6_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
{}

static void gfx_v6_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
{}
/*
static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev)
{
	const __le32 *fw_data;
	volatile u32 *dst_ptr;
	int me, i, max_me = 4;
	u32 bo_offset = 0;
	u32 table_offset, table_size;

	if (adev->asic_type == CHIP_KAVERI)
		max_me = 5;

	if (adev->gfx.rlc.cp_table_ptr == NULL)
		return;

	dst_ptr = adev->gfx.rlc.cp_table_ptr;
	for (me = 0; me < max_me; me++) {
		if (me == 0) {
			const struct gfx_firmware_header_v1_0 *hdr =
				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
			fw_data = (const __le32 *)
				(adev->gfx.ce_fw->data +
				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			table_offset = le32_to_cpu(hdr->jt_offset);
			table_size = le32_to_cpu(hdr->jt_size);
		} else if (me == 1) {
			const struct gfx_firmware_header_v1_0 *hdr =
				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
			fw_data = (const __le32 *)
				(adev->gfx.pfp_fw->data +
				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			table_offset = le32_to_cpu(hdr->jt_offset);
			table_size = le32_to_cpu(hdr->jt_size);
		} else if (me == 2) {
			const struct gfx_firmware_header_v1_0 *hdr =
				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
			fw_data = (const __le32 *)
				(adev->gfx.me_fw->data +
				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			table_offset = le32_to_cpu(hdr->jt_offset);
			table_size = le32_to_cpu(hdr->jt_size);
		} else if (me == 3) {
			const struct gfx_firmware_header_v1_0 *hdr =
				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
			fw_data = (const __le32 *)
				(adev->gfx.mec_fw->data +
				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			table_offset = le32_to_cpu(hdr->jt_offset);
			table_size = le32_to_cpu(hdr->jt_size);
		} else {
			const struct gfx_firmware_header_v1_0 *hdr =
				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
			fw_data = (const __le32 *)
				(adev->gfx.mec2_fw->data +
				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			table_offset = le32_to_cpu(hdr->jt_offset);
			table_size = le32_to_cpu(hdr->jt_size);
		}

		for (i = 0; i < table_size; i ++) {
			dst_ptr[bo_offset + i] =
				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
		}

		bo_offset += table_size;
	}
}
*/
static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev,
				     bool enable)
{}

static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
{}

static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
					    bool enable)
{}

static void gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
					     bool enable)
{}

static void gfx_v6_0_init_gfx_cgpg(struct amdgpu_device *adev)
{}

static void gfx_v6_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
{}

static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
{}

static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
				    volatile u32 *buffer)
{}

static void gfx_v6_0_init_pg(struct amdgpu_device *adev)
{}

static void gfx_v6_0_fini_pg(struct amdgpu_device *adev)
{}

static uint64_t gfx_v6_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{}

static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{}


static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{}

static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
			   uint32_t wave, uint32_t thread,
			   uint32_t regno, uint32_t num, uint32_t *out)
{}

static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{}

static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
				     uint32_t wave, uint32_t start,
				     uint32_t size, uint32_t *dst)
{}

static void gfx_v6_0_select_me_pipe_q(struct amdgpu_device *adev,
				  u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
{}

static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs =;

static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs =;

static int gfx_v6_0_early_init(void *handle)
{}

static int gfx_v6_0_sw_init(void *handle)
{}

static int gfx_v6_0_sw_fini(void *handle)
{}

static int gfx_v6_0_hw_init(void *handle)
{}

static int gfx_v6_0_hw_fini(void *handle)
{}

static int gfx_v6_0_suspend(void *handle)
{}

static int gfx_v6_0_resume(void *handle)
{}

static bool gfx_v6_0_is_idle(void *handle)
{}

static int gfx_v6_0_wait_for_idle(void *handle)
{}

static int gfx_v6_0_soft_reset(void *handle)
{}

static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
						 enum amdgpu_interrupt_state state)
{}

static void gfx_v6_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
						     int ring,
						     enum amdgpu_interrupt_state state)
{}

static int gfx_v6_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
					     struct amdgpu_irq_src *src,
					     unsigned type,
					     enum amdgpu_interrupt_state state)
{}

static int gfx_v6_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
					      struct amdgpu_irq_src *src,
					      unsigned type,
					      enum amdgpu_interrupt_state state)
{}

static int gfx_v6_0_set_eop_interrupt_state(struct amdgpu_device *adev,
					    struct amdgpu_irq_src *src,
					    unsigned type,
					    enum amdgpu_interrupt_state state)
{}

static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
			    struct amdgpu_irq_src *source,
			    struct amdgpu_iv_entry *entry)
{}

static void gfx_v6_0_fault(struct amdgpu_device *adev,
			   struct amdgpu_iv_entry *entry)
{}

static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev,
				 struct amdgpu_irq_src *source,
				 struct amdgpu_iv_entry *entry)
{}

static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
				  struct amdgpu_irq_src *source,
				  struct amdgpu_iv_entry *entry)
{}

static int gfx_v6_0_set_clockgating_state(void *handle,
					  enum amd_clockgating_state state)
{}

static int gfx_v6_0_set_powergating_state(void *handle,
					  enum amd_powergating_state state)
{}

static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring)
{}

static const struct amd_ip_funcs gfx_v6_0_ip_funcs =;

static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx =;

static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute =;

static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
{}

static const struct amdgpu_irq_src_funcs gfx_v6_0_eop_irq_funcs =;

static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_reg_irq_funcs =;

static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_inst_irq_funcs =;

static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev)
{}

static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
{}

const struct amdgpu_ip_block_version gfx_v6_0_ip_block =;