linux/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c

/*
 * Copyright 2016 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>

#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"

#include "sdma0/sdma0_4_2_offset.h"
#include "sdma0/sdma0_4_2_sh_mask.h"
#include "sdma1/sdma1_4_2_offset.h"
#include "sdma1/sdma1_4_2_sh_mask.h"
#include "sdma2/sdma2_4_2_2_offset.h"
#include "sdma2/sdma2_4_2_2_sh_mask.h"
#include "sdma3/sdma3_4_2_2_offset.h"
#include "sdma3/sdma3_4_2_2_sh_mask.h"
#include "sdma4/sdma4_4_2_2_offset.h"
#include "sdma4/sdma4_4_2_2_sh_mask.h"
#include "sdma5/sdma5_4_2_2_offset.h"
#include "sdma5/sdma5_4_2_2_sh_mask.h"
#include "sdma6/sdma6_4_2_2_offset.h"
#include "sdma6/sdma6_4_2_2_sh_mask.h"
#include "sdma7/sdma7_4_2_2_offset.h"
#include "sdma7/sdma7_4_2_2_sh_mask.h"
#include "sdma0/sdma0_4_1_default.h"

#include "soc15_common.h"
#include "soc15.h"
#include "vega10_sdma_pkt_open.h"

#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"

#include "amdgpu_ras.h"
#include "sdma_v4_4.h"

MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();

#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK

#define WREG32_SDMA(instance, offset, value)
#define RREG32_SDMA(instance, offset)

static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev);

static const struct soc15_reg_golden golden_settings_sdma_4[] =;

static const struct soc15_reg_golden golden_settings_sdma_vg10[] =;

static const struct soc15_reg_golden golden_settings_sdma_vg12[] =;

static const struct soc15_reg_golden golden_settings_sdma_4_1[] =;

static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] =;

static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =;

static const struct soc15_reg_golden golden_settings_sdma1_4_2[] =;

static const struct soc15_reg_golden golden_settings_sdma_rv1[] =;

static const struct soc15_reg_golden golden_settings_sdma_rv2[] =;

static const struct soc15_reg_golden golden_settings_sdma_arct[] =;

static const struct soc15_reg_golden golden_settings_sdma_aldebaran[] =;

static const struct soc15_reg_golden golden_settings_sdma_4_3[] =;

static const struct soc15_ras_field_entry sdma_v4_0_ras_fields[] =;

static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
		u32 instance, u32 offset)
{}

static unsigned sdma_v4_0_seq_to_irq_id(int seq_num)
{}

static int sdma_v4_0_irq_id_to_seq(unsigned client_id)
{}

static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
{}

static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
{}

/**
 * sdma_v4_0_init_microcode - load ucode images from disk
 *
 * @adev: amdgpu_device pointer
 *
 * Use the firmware interface to load the ucode images into
 * the driver (not loaded into hw).
 * Returns 0 on success, error on failure.
 */

// emulation only, won't work on real chip
// vega10 real chip need to use PSP to load firmware
static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
{}

/**
 * sdma_v4_0_ring_get_rptr - get the current read pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Get the current rptr from the hardware (VEGA10+).
 */
static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
{}

/**
 * sdma_v4_0_ring_get_wptr - get the current write pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Get the current wptr from the hardware (VEGA10+).
 */
static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
{}

/**
 * sdma_v4_0_ring_set_wptr - commit the write pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Write the wptr back to the hardware (VEGA10+).
 */
static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
{}

/**
 * sdma_v4_0_page_ring_get_wptr - get the current write pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Get the current wptr from the hardware (VEGA10+).
 */
static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
{}

/**
 * sdma_v4_0_page_ring_set_wptr - commit the write pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Write the wptr back to the hardware (VEGA10+).
 */
static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring)
{}

static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{}

/**
 * sdma_v4_0_ring_emit_ib - Schedule an IB on the DMA engine
 *
 * @ring: amdgpu ring pointer
 * @job: job to retrieve vmid from
 * @ib: IB object to schedule
 * @flags: unused
 *
 * Schedule an IB in the DMA ring (VEGA10).
 */
static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
				   struct amdgpu_job *job,
				   struct amdgpu_ib *ib,
				   uint32_t flags)
{}

static void sdma_v4_0_wait_reg_mem(struct amdgpu_ring *ring,
				   int mem_space, int hdp,
				   uint32_t addr0, uint32_t addr1,
				   uint32_t ref, uint32_t mask,
				   uint32_t inv)
{}

/**
 * sdma_v4_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
 *
 * @ring: amdgpu ring pointer
 *
 * Emit an hdp flush packet on the requested DMA ring.
 */
static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{}

/**
 * sdma_v4_0_ring_emit_fence - emit a fence on the DMA ring
 *
 * @ring: amdgpu ring pointer
 * @addr: address
 * @seq: sequence number
 * @flags: fence related flags
 *
 * Add a DMA fence packet to the ring to write
 * the fence seq number and DMA trap packet to generate
 * an interrupt if needed (VEGA10).
 */
static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
				      unsigned flags)
{}


/**
 * sdma_v4_0_gfx_enable - enable the gfx async dma engines
 *
 * @adev: amdgpu_device pointer
 * @enable: enable SDMA RB/IB
 * control the gfx async dma ring buffers (VEGA10).
 */
static void sdma_v4_0_gfx_enable(struct amdgpu_device *adev, bool enable)
{}

/**
 * sdma_v4_0_rlc_stop - stop the compute async dma engines
 *
 * @adev: amdgpu_device pointer
 *
 * Stop the compute async dma queues (VEGA10).
 */
static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
{}

/**
 * sdma_v4_0_page_stop - stop the page async dma engines
 *
 * @adev: amdgpu_device pointer
 *
 * Stop the page async dma ring buffers (VEGA10).
 */
static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
{}

/**
 * sdma_v4_0_ctx_switch_enable - stop the async dma engines context switch
 *
 * @adev: amdgpu_device pointer
 * @enable: enable/disable the DMA MEs context switch.
 *
 * Halt or unhalt the async dma engines context switch (VEGA10).
 */
static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{}

/**
 * sdma_v4_0_enable - stop the async dma engines
 *
 * @adev: amdgpu_device pointer
 * @enable: enable/disable the DMA MEs.
 *
 * Halt or unhalt the async dma engines (VEGA10).
 */
static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
{}

/*
 * sdma_v4_0_rb_cntl - get parameters for rb_cntl
 */
static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
{}

/**
 * sdma_v4_0_gfx_resume - setup and start the async dma engines
 *
 * @adev: amdgpu_device pointer
 * @i: instance to resume
 *
 * Set up the gfx DMA ring buffers and enable them (VEGA10).
 * Returns 0 for success, error for failure.
 */
static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
{}

/**
 * sdma_v4_0_page_resume - setup and start the async dma engines
 *
 * @adev: amdgpu_device pointer
 * @i: instance to resume
 *
 * Set up the page DMA ring buffers and enable them (VEGA10).
 * Returns 0 for success, error for failure.
 */
static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
{}

static void
sdma_v4_1_update_power_gating(struct amdgpu_device *adev, bool enable)
{}

static void sdma_v4_1_init_power_gating(struct amdgpu_device *adev)
{}

static void sdma_v4_0_init_pg(struct amdgpu_device *adev)
{}

/**
 * sdma_v4_0_rlc_resume - setup and start the async dma engines
 *
 * @adev: amdgpu_device pointer
 *
 * Set up the compute DMA queues and enable them (VEGA10).
 * Returns 0 for success, error for failure.
 */
static int sdma_v4_0_rlc_resume(struct amdgpu_device *adev)
{}

/**
 * sdma_v4_0_load_microcode - load the sDMA ME ucode
 *
 * @adev: amdgpu_device pointer
 *
 * Loads the sDMA0/1 ucode.
 * Returns 0 for success, -EINVAL if the ucode is not available.
 */
static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
{}

/**
 * sdma_v4_0_start - setup and start the async dma engines
 *
 * @adev: amdgpu_device pointer
 *
 * Set up the DMA engines and enable them (VEGA10).
 * Returns 0 for success, error for failure.
 */
static int sdma_v4_0_start(struct amdgpu_device *adev)
{}

/**
 * sdma_v4_0_ring_test_ring - simple async dma engine test
 *
 * @ring: amdgpu_ring structure holding ring information
 *
 * Test the DMA engine by writing using it to write an
 * value to memory. (VEGA10).
 * Returns 0 for success, error for failure.
 */
static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
{}

/**
 * sdma_v4_0_ring_test_ib - test an IB on the DMA engine
 *
 * @ring: amdgpu_ring structure holding ring information
 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 *
 * Test a simple IB in the DMA ring (VEGA10).
 * Returns 0 on success, error on failure.
 */
static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{}


/**
 * sdma_v4_0_vm_copy_pte - update PTEs by copying them from the GART
 *
 * @ib: indirect buffer to fill with commands
 * @pe: addr of the page entry
 * @src: src addr to copy from
 * @count: number of page entries to update
 *
 * Update PTEs by copying them from the GART using sDMA (VEGA10).
 */
static void sdma_v4_0_vm_copy_pte(struct amdgpu_ib *ib,
				  uint64_t pe, uint64_t src,
				  unsigned count)
{}

/**
 * sdma_v4_0_vm_write_pte - update PTEs by writing them manually
 *
 * @ib: indirect buffer to fill with commands
 * @pe: addr of the page entry
 * @value: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 *
 * Update PTEs by writing them manually using sDMA (VEGA10).
 */
static void sdma_v4_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
				   uint64_t value, unsigned count,
				   uint32_t incr)
{}

/**
 * sdma_v4_0_vm_set_pte_pde - update the page tables using sDMA
 *
 * @ib: indirect buffer to fill with commands
 * @pe: addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: access flags
 *
 * Update the page tables using sDMA (VEGA10).
 */
static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib,
				     uint64_t pe,
				     uint64_t addr, unsigned count,
				     uint32_t incr, uint64_t flags)
{}

/**
 * sdma_v4_0_ring_pad_ib - pad the IB to the required number of dw
 *
 * @ring: amdgpu_ring structure holding ring information
 * @ib: indirect buffer to fill with padding
 */
static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{}


/**
 * sdma_v4_0_ring_emit_pipeline_sync - sync the pipeline
 *
 * @ring: amdgpu_ring pointer
 *
 * Make sure all previous operations are completed (CIK).
 */
static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{}


/**
 * sdma_v4_0_ring_emit_vm_flush - vm flush using sDMA
 *
 * @ring: amdgpu_ring pointer
 * @vmid: vmid number to use
 * @pd_addr: address
 *
 * Update the page table base and flush the VM TLB
 * using sDMA (VEGA10).
 */
static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
					 unsigned vmid, uint64_t pd_addr)
{}

static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
				     uint32_t reg, uint32_t val)
{}

static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
					 uint32_t val, uint32_t mask)
{}

static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
{}

static int sdma_v4_0_early_init(void *handle)
{}

static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
		void *err_data,
		struct amdgpu_iv_entry *entry);

static int sdma_v4_0_late_init(void *handle)
{}

static int sdma_v4_0_sw_init(void *handle)
{}

static int sdma_v4_0_sw_fini(void *handle)
{}

static int sdma_v4_0_hw_init(void *handle)
{}

static int sdma_v4_0_hw_fini(void *handle)
{}

static int sdma_v4_0_suspend(void *handle)
{}

static int sdma_v4_0_resume(void *handle)
{}

static bool sdma_v4_0_is_idle(void *handle)
{}

static int sdma_v4_0_wait_for_idle(void *handle)
{}

static int sdma_v4_0_soft_reset(void *handle)
{}

static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
					struct amdgpu_irq_src *source,
					unsigned type,
					enum amdgpu_interrupt_state state)
{}

static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
				      struct amdgpu_irq_src *source,
				      struct amdgpu_iv_entry *entry)
{}

static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
		void *err_data,
		struct amdgpu_iv_entry *entry)
{}

static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
					      struct amdgpu_irq_src *source,
					      struct amdgpu_iv_entry *entry)
{}

static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev,
					struct amdgpu_irq_src *source,
					unsigned type,
					enum amdgpu_interrupt_state state)
{}

static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev,
					      struct amdgpu_iv_entry *entry)
{}

static int sdma_v4_0_process_vm_hole_irq(struct amdgpu_device *adev,
					      struct amdgpu_irq_src *source,
					      struct amdgpu_iv_entry *entry)
{}

static int sdma_v4_0_process_doorbell_invalid_irq(struct amdgpu_device *adev,
					      struct amdgpu_irq_src *source,
					      struct amdgpu_iv_entry *entry)
{}

static int sdma_v4_0_process_pool_timeout_irq(struct amdgpu_device *adev,
					      struct amdgpu_irq_src *source,
					      struct amdgpu_iv_entry *entry)
{}

static int sdma_v4_0_process_srbm_write_irq(struct amdgpu_device *adev,
					      struct amdgpu_irq_src *source,
					      struct amdgpu_iv_entry *entry)
{}

static void sdma_v4_0_update_medium_grain_clock_gating(
		struct amdgpu_device *adev,
		bool enable)
{}


static void sdma_v4_0_update_medium_grain_light_sleep(
		struct amdgpu_device *adev,
		bool enable)
{}

static int sdma_v4_0_set_clockgating_state(void *handle,
					  enum amd_clockgating_state state)
{}

static int sdma_v4_0_set_powergating_state(void *handle,
					  enum amd_powergating_state state)
{}

static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags)
{}

const struct amd_ip_funcs sdma_v4_0_ip_funcs =;

static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs =;

static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs =;

static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
{}

static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs =;

static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs =;

static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs =;

static const struct amdgpu_irq_src_funcs sdma_v4_0_vm_hole_irq_funcs =;

static const struct amdgpu_irq_src_funcs sdma_v4_0_doorbell_invalid_irq_funcs =;

static const struct amdgpu_irq_src_funcs sdma_v4_0_pool_timeout_irq_funcs =;

static const struct amdgpu_irq_src_funcs sdma_v4_0_srbm_write_irq_funcs =;

static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
{}

/**
 * sdma_v4_0_emit_copy_buffer - copy buffer using the sDMA engine
 *
 * @ib: indirect buffer to copy to
 * @src_offset: src GPU address
 * @dst_offset: dst GPU address
 * @byte_count: number of bytes to xfer
 * @copy_flags: copy flags for the buffers
 *
 * Copy GPU buffers using the DMA engine (VEGA10/12).
 * Used by the amdgpu ttm implementation to move pages if
 * registered as the asic copy callback.
 */
static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
				       uint64_t src_offset,
				       uint64_t dst_offset,
				       uint32_t byte_count,
				       uint32_t copy_flags)
{}

/**
 * sdma_v4_0_emit_fill_buffer - fill buffer using the sDMA engine
 *
 * @ib: indirect buffer to copy to
 * @src_data: value to write to buffer
 * @dst_offset: dst GPU address
 * @byte_count: number of bytes to xfer
 *
 * Fill GPU buffers using the DMA engine (VEGA10/12).
 */
static void sdma_v4_0_emit_fill_buffer(struct amdgpu_ib *ib,
				       uint32_t src_data,
				       uint64_t dst_offset,
				       uint32_t byte_count)
{}

static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs =;

static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
{}

static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs =;

static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
{}

static void sdma_v4_0_get_ras_error_count(uint32_t value,
					uint32_t instance,
					uint32_t *sec_count)
{}

static int sdma_v4_0_query_ras_error_count_by_instance(struct amdgpu_device *adev,
			uint32_t instance, void *ras_error_status)
{
	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
	uint32_t sec_count = 0;
	uint32_t reg_value = 0;

	reg_value = RREG32_SDMA(instance, mmSDMA0_EDC_COUNTER);
	/* double bit error is not supported */
	if (reg_value)
		sdma_v4_0_get_ras_error_count(reg_value,
				instance, &sec_count);
	/* err_data->ce_count should be initialized to 0
	 * before calling into this function */
	err_data->ce_count += sec_count;
	/* double bit error is not supported
	 * set ue count to 0 */
	err_data->ue_count = 0;

	return 0;
};

static void sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,  void *ras_error_status)
{}

static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
{}

const struct amdgpu_ras_block_hw_ops sdma_v4_0_ras_hw_ops =;

static struct amdgpu_sdma_ras sdma_v4_0_ras =;

static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
{}

const struct amdgpu_ip_block_version sdma_v4_0_ip_block =;