linux/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c

/*
 * Copyright 2022 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>

#include "amdgpu.h"
#include "amdgpu_xcp.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"

#include "sdma/sdma_4_4_2_offset.h"
#include "sdma/sdma_4_4_2_sh_mask.h"

#include "soc15_common.h"
#include "soc15.h"
#include "vega10_sdma_pkt_open.h"

#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"

#include "amdgpu_ras.h"

MODULE_FIRMWARE();
MODULE_FIRMWARE();

#define mmSMNAID_AID0_MCA_SMU

#define WREG32_SDMA(instance, offset, value)
#define RREG32_SDMA(instance, offset)

static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);

static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
		u32 instance, u32 offset)
{}

static unsigned sdma_v4_4_2_seq_to_irq_id(int seq_num)
{}

static int sdma_v4_4_2_irq_id_to_seq(struct amdgpu_device *adev, unsigned client_id)
{}

static void sdma_v4_4_2_inst_init_golden_registers(struct amdgpu_device *adev,
						   uint32_t inst_mask)
{}

/**
 * sdma_v4_4_2_init_microcode - load ucode images from disk
 *
 * @adev: amdgpu_device pointer
 *
 * Use the firmware interface to load the ucode images into
 * the driver (not loaded into hw).
 * Returns 0 on success, error on failure.
 */
static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev)
{}

/**
 * sdma_v4_4_2_ring_get_rptr - get the current read pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Get the current rptr from the hardware.
 */
static uint64_t sdma_v4_4_2_ring_get_rptr(struct amdgpu_ring *ring)
{}

/**
 * sdma_v4_4_2_ring_get_wptr - get the current write pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Get the current wptr from the hardware.
 */
static uint64_t sdma_v4_4_2_ring_get_wptr(struct amdgpu_ring *ring)
{}

/**
 * sdma_v4_4_2_ring_set_wptr - commit the write pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Write the wptr back to the hardware.
 */
static void sdma_v4_4_2_ring_set_wptr(struct amdgpu_ring *ring)
{}

/**
 * sdma_v4_4_2_page_ring_get_wptr - get the current write pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Get the current wptr from the hardware.
 */
static uint64_t sdma_v4_4_2_page_ring_get_wptr(struct amdgpu_ring *ring)
{}

/**
 * sdma_v4_4_2_page_ring_set_wptr - commit the write pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Write the wptr back to the hardware.
 */
static void sdma_v4_4_2_page_ring_set_wptr(struct amdgpu_ring *ring)
{}

static void sdma_v4_4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{}

/**
 * sdma_v4_4_2_ring_emit_ib - Schedule an IB on the DMA engine
 *
 * @ring: amdgpu ring pointer
 * @job: job to retrieve vmid from
 * @ib: IB object to schedule
 * @flags: unused
 *
 * Schedule an IB in the DMA ring.
 */
static void sdma_v4_4_2_ring_emit_ib(struct amdgpu_ring *ring,
				   struct amdgpu_job *job,
				   struct amdgpu_ib *ib,
				   uint32_t flags)
{}

static void sdma_v4_4_2_wait_reg_mem(struct amdgpu_ring *ring,
				   int mem_space, int hdp,
				   uint32_t addr0, uint32_t addr1,
				   uint32_t ref, uint32_t mask,
				   uint32_t inv)
{}

/**
 * sdma_v4_4_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
 *
 * @ring: amdgpu ring pointer
 *
 * Emit an hdp flush packet on the requested DMA ring.
 */
static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{}

/**
 * sdma_v4_4_2_ring_emit_fence - emit a fence on the DMA ring
 *
 * @ring: amdgpu ring pointer
 * @addr: address
 * @seq: sequence number
 * @flags: fence related flags
 *
 * Add a DMA fence packet to the ring to write
 * the fence seq number and DMA trap packet to generate
 * an interrupt if needed.
 */
static void sdma_v4_4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
				      unsigned flags)
{}


/**
 * sdma_v4_4_2_inst_gfx_stop - stop the gfx async dma engines
 *
 * @adev: amdgpu_device pointer
 * @inst_mask: mask of dma engine instances to be disabled
 *
 * Stop the gfx async dma ring buffers.
 */
static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
				      uint32_t inst_mask)
{}

/**
 * sdma_v4_4_2_inst_rlc_stop - stop the compute async dma engines
 *
 * @adev: amdgpu_device pointer
 * @inst_mask: mask of dma engine instances to be disabled
 *
 * Stop the compute async dma queues.
 */
static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev,
				      uint32_t inst_mask)
{}

/**
 * sdma_v4_4_2_inst_page_stop - stop the page async dma engines
 *
 * @adev: amdgpu_device pointer
 * @inst_mask: mask of dma engine instances to be disabled
 *
 * Stop the page async dma ring buffers.
 */
static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev,
				       uint32_t inst_mask)
{}

/**
 * sdma_v4_4_2_inst_ctx_switch_enable - stop the async dma engines context switch
 *
 * @adev: amdgpu_device pointer
 * @enable: enable/disable the DMA MEs context switch.
 * @inst_mask: mask of dma engine instances to be enabled
 *
 * Halt or unhalt the async dma engines context switch.
 */
static void sdma_v4_4_2_inst_ctx_switch_enable(struct amdgpu_device *adev,
					       bool enable, uint32_t inst_mask)
{}

/**
 * sdma_v4_4_2_inst_enable - stop the async dma engines
 *
 * @adev: amdgpu_device pointer
 * @enable: enable/disable the DMA MEs.
 * @inst_mask: mask of dma engine instances to be enabled
 *
 * Halt or unhalt the async dma engines.
 */
static void sdma_v4_4_2_inst_enable(struct amdgpu_device *adev, bool enable,
				    uint32_t inst_mask)
{}

/*
 * sdma_v4_4_2_rb_cntl - get parameters for rb_cntl
 */
static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
{}

/**
 * sdma_v4_4_2_gfx_resume - setup and start the async dma engines
 *
 * @adev: amdgpu_device pointer
 * @i: instance to resume
 *
 * Set up the gfx DMA ring buffers and enable them.
 * Returns 0 for success, error for failure.
 */
static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
{}

/**
 * sdma_v4_4_2_page_resume - setup and start the async dma engines
 *
 * @adev: amdgpu_device pointer
 * @i: instance to resume
 *
 * Set up the page DMA ring buffers and enable them.
 * Returns 0 for success, error for failure.
 */
static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
{}

static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev)
{}

/**
 * sdma_v4_4_2_inst_rlc_resume - setup and start the async dma engines
 *
 * @adev: amdgpu_device pointer
 * @inst_mask: mask of dma engine instances to be enabled
 *
 * Set up the compute DMA queues and enable them.
 * Returns 0 for success, error for failure.
 */
static int sdma_v4_4_2_inst_rlc_resume(struct amdgpu_device *adev,
				       uint32_t inst_mask)
{}

/**
 * sdma_v4_4_2_inst_load_microcode - load the sDMA ME ucode
 *
 * @adev: amdgpu_device pointer
 * @inst_mask: mask of dma engine instances to be enabled
 *
 * Loads the sDMA0/1 ucode.
 * Returns 0 for success, -EINVAL if the ucode is not available.
 */
static int sdma_v4_4_2_inst_load_microcode(struct amdgpu_device *adev,
					   uint32_t inst_mask)
{}

/**
 * sdma_v4_4_2_inst_start - setup and start the async dma engines
 *
 * @adev: amdgpu_device pointer
 * @inst_mask: mask of dma engine instances to be enabled
 *
 * Set up the DMA engines and enable them.
 * Returns 0 for success, error for failure.
 */
static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
				  uint32_t inst_mask)
{}

/**
 * sdma_v4_4_2_ring_test_ring - simple async dma engine test
 *
 * @ring: amdgpu_ring structure holding ring information
 *
 * Test the DMA engine by writing using it to write an
 * value to memory.
 * Returns 0 for success, error for failure.
 */
static int sdma_v4_4_2_ring_test_ring(struct amdgpu_ring *ring)
{}

/**
 * sdma_v4_4_2_ring_test_ib - test an IB on the DMA engine
 *
 * @ring: amdgpu_ring structure holding ring information
 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 *
 * Test a simple IB in the DMA ring.
 * Returns 0 on success, error on failure.
 */
static int sdma_v4_4_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{}


/**
 * sdma_v4_4_2_vm_copy_pte - update PTEs by copying them from the GART
 *
 * @ib: indirect buffer to fill with commands
 * @pe: addr of the page entry
 * @src: src addr to copy from
 * @count: number of page entries to update
 *
 * Update PTEs by copying them from the GART using sDMA.
 */
static void sdma_v4_4_2_vm_copy_pte(struct amdgpu_ib *ib,
				  uint64_t pe, uint64_t src,
				  unsigned count)
{}

/**
 * sdma_v4_4_2_vm_write_pte - update PTEs by writing them manually
 *
 * @ib: indirect buffer to fill with commands
 * @pe: addr of the page entry
 * @value: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 *
 * Update PTEs by writing them manually using sDMA.
 */
static void sdma_v4_4_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
				   uint64_t value, unsigned count,
				   uint32_t incr)
{}

/**
 * sdma_v4_4_2_vm_set_pte_pde - update the page tables using sDMA
 *
 * @ib: indirect buffer to fill with commands
 * @pe: addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: access flags
 *
 * Update the page tables using sDMA.
 */
static void sdma_v4_4_2_vm_set_pte_pde(struct amdgpu_ib *ib,
				     uint64_t pe,
				     uint64_t addr, unsigned count,
				     uint32_t incr, uint64_t flags)
{}

/**
 * sdma_v4_4_2_ring_pad_ib - pad the IB to the required number of dw
 *
 * @ring: amdgpu_ring structure holding ring information
 * @ib: indirect buffer to fill with padding
 */
static void sdma_v4_4_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{}


/**
 * sdma_v4_4_2_ring_emit_pipeline_sync - sync the pipeline
 *
 * @ring: amdgpu_ring pointer
 *
 * Make sure all previous operations are completed (CIK).
 */
static void sdma_v4_4_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{}


/**
 * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA
 *
 * @ring: amdgpu_ring pointer
 * @vmid: vmid number to use
 * @pd_addr: address
 *
 * Update the page table base and flush the VM TLB
 * using sDMA.
 */
static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
					 unsigned vmid, uint64_t pd_addr)
{}

static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring,
				     uint32_t reg, uint32_t val)
{}

static void sdma_v4_4_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
					 uint32_t val, uint32_t mask)
{}

static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
{}

static int sdma_v4_4_2_early_init(void *handle)
{}

#if 0
static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
		void *err_data,
		struct amdgpu_iv_entry *entry);
#endif

static int sdma_v4_4_2_late_init(void *handle)
{}

static int sdma_v4_4_2_sw_init(void *handle)
{}

static int sdma_v4_4_2_sw_fini(void *handle)
{}

static int sdma_v4_4_2_hw_init(void *handle)
{}

static int sdma_v4_4_2_hw_fini(void *handle)
{}

static int sdma_v4_4_2_set_clockgating_state(void *handle,
					     enum amd_clockgating_state state);

static int sdma_v4_4_2_suspend(void *handle)
{}

static int sdma_v4_4_2_resume(void *handle)
{}

static bool sdma_v4_4_2_is_idle(void *handle)
{}

static int sdma_v4_4_2_wait_for_idle(void *handle)
{}

static int sdma_v4_4_2_soft_reset(void *handle)
{}

static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
					struct amdgpu_irq_src *source,
					unsigned type,
					enum amdgpu_interrupt_state state)
{}

static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev,
				      struct amdgpu_irq_src *source,
				      struct amdgpu_iv_entry *entry)
{}

#if 0
static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
		void *err_data,
		struct amdgpu_iv_entry *entry)
{
	int instance;

	/* When “Full RAS” is enabled, the per-IP interrupt sources should
	 * be disabled and the driver should only look for the aggregated
	 * interrupt via sync flood
	 */
	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA))
		goto out;

	instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id);
	if (instance < 0)
		goto out;

	amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);

out:
	return AMDGPU_RAS_SUCCESS;
}
#endif

static int sdma_v4_4_2_process_illegal_inst_irq(struct amdgpu_device *adev,
					      struct amdgpu_irq_src *source,
					      struct amdgpu_iv_entry *entry)
{}

static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
					struct amdgpu_irq_src *source,
					unsigned type,
					enum amdgpu_interrupt_state state)
{}

static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
					      struct amdgpu_iv_entry *entry)
{}

static int sdma_v4_4_2_process_vm_hole_irq(struct amdgpu_device *adev,
					      struct amdgpu_irq_src *source,
					      struct amdgpu_iv_entry *entry)
{}

static int sdma_v4_4_2_process_doorbell_invalid_irq(struct amdgpu_device *adev,
					      struct amdgpu_irq_src *source,
					      struct amdgpu_iv_entry *entry)
{}

static int sdma_v4_4_2_process_pool_timeout_irq(struct amdgpu_device *adev,
					      struct amdgpu_irq_src *source,
					      struct amdgpu_iv_entry *entry)
{}

static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev,
					      struct amdgpu_irq_src *source,
					      struct amdgpu_iv_entry *entry)
{}

static void sdma_v4_4_2_inst_update_medium_grain_light_sleep(
	struct amdgpu_device *adev, bool enable, uint32_t inst_mask)
{}

static void sdma_v4_4_2_inst_update_medium_grain_clock_gating(
	struct amdgpu_device *adev, bool enable, uint32_t inst_mask)
{}

static int sdma_v4_4_2_set_clockgating_state(void *handle,
					  enum amd_clockgating_state state)
{}

static int sdma_v4_4_2_set_powergating_state(void *handle,
					  enum amd_powergating_state state)
{}

static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags)
{}

const struct amd_ip_funcs sdma_v4_4_2_ip_funcs =;

static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs =;

static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs =;

static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
{}

static const struct amdgpu_irq_src_funcs sdma_v4_4_2_trap_irq_funcs =;

static const struct amdgpu_irq_src_funcs sdma_v4_4_2_illegal_inst_irq_funcs =;

static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ecc_irq_funcs =;

static const struct amdgpu_irq_src_funcs sdma_v4_4_2_vm_hole_irq_funcs =;

static const struct amdgpu_irq_src_funcs sdma_v4_4_2_doorbell_invalid_irq_funcs =;

static const struct amdgpu_irq_src_funcs sdma_v4_4_2_pool_timeout_irq_funcs =;

static const struct amdgpu_irq_src_funcs sdma_v4_4_2_srbm_write_irq_funcs =;

static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
{}

/**
 * sdma_v4_4_2_emit_copy_buffer - copy buffer using the sDMA engine
 *
 * @ib: indirect buffer to copy to
 * @src_offset: src GPU address
 * @dst_offset: dst GPU address
 * @byte_count: number of bytes to xfer
 * @copy_flags: copy flags for the buffers
 *
 * Copy GPU buffers using the DMA engine.
 * Used by the amdgpu ttm implementation to move pages if
 * registered as the asic copy callback.
 */
static void sdma_v4_4_2_emit_copy_buffer(struct amdgpu_ib *ib,
				       uint64_t src_offset,
				       uint64_t dst_offset,
				       uint32_t byte_count,
				       uint32_t copy_flags)
{}

/**
 * sdma_v4_4_2_emit_fill_buffer - fill buffer using the sDMA engine
 *
 * @ib: indirect buffer to copy to
 * @src_data: value to write to buffer
 * @dst_offset: dst GPU address
 * @byte_count: number of bytes to xfer
 *
 * Fill GPU buffers using the DMA engine.
 */
static void sdma_v4_4_2_emit_fill_buffer(struct amdgpu_ib *ib,
				       uint32_t src_data,
				       uint64_t dst_offset,
				       uint32_t byte_count)
{}

static const struct amdgpu_buffer_funcs sdma_v4_4_2_buffer_funcs =;

static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev)
{}

static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs =;

static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev)
{}

const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block =;

static int sdma_v4_4_2_xcp_resume(void *handle, uint32_t inst_mask)
{}

static int sdma_v4_4_2_xcp_suspend(void *handle, uint32_t inst_mask)
{}

struct amdgpu_xcp_ip_funcs sdma_v4_4_2_xcp_funcs =;

static const struct amdgpu_ras_err_status_reg_entry sdma_v4_2_2_ue_reg_list[] =;

static const struct amdgpu_ras_memory_id_entry sdma_v4_4_2_ras_memory_list[] =;

static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev,
						   uint32_t sdma_inst,
						   void *ras_err_status)
{}

static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev,
					      void *ras_err_status)
{}

static void sdma_v4_4_2_inst_reset_ras_error_count(struct amdgpu_device *adev,
						   uint32_t sdma_inst)
{}

static void sdma_v4_4_2_reset_ras_error_count(struct amdgpu_device *adev)
{}

static const struct amdgpu_ras_block_hw_ops sdma_v4_4_2_ras_hw_ops =;

static int sdma_v4_4_2_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
				       enum aca_smu_type type, void *data)
{}

/* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */
static int sdma_v4_4_2_err_codes[] =;

static bool sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
					  enum aca_smu_type type, void *data)
{}

static const struct aca_bank_ops sdma_v4_4_2_aca_bank_ops =;

static const struct aca_info sdma_v4_4_2_aca_info =;

static int sdma_v4_4_2_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{}

static struct amdgpu_sdma_ras sdma_v4_4_2_ras =;

static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev)
{}