linux/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c

/*
 * Copyright 2019 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>

#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"

#include "gc/gc_10_3_0_offset.h"
#include "gc/gc_10_3_0_sh_mask.h"
#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
#include "ivsrcid/sdma2/irqsrcs_sdma2_5_0.h"
#include "ivsrcid/sdma3/irqsrcs_sdma3_5_0.h"

#include "soc15_common.h"
#include "soc15.h"
#include "navi10_sdma_pkt_open.h"
#include "nbio_v2_3.h"
#include "sdma_common.h"
#include "sdma_v5_2.h"

MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();

MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();
MODULE_FIRMWARE();

#define SDMA1_REG_OFFSET
#define SDMA3_REG_OFFSET
#define SDMA0_HYP_DEC_REG_START
#define SDMA0_HYP_DEC_REG_END
#define SDMA1_HYP_DEC_REG_OFFSET

static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev);

static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
{}

static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring,
					      uint64_t addr)
{}

/**
 * sdma_v5_2_ring_get_rptr - get the current read pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Get the current rptr from the hardware (NAVI10+).
 */
static uint64_t sdma_v5_2_ring_get_rptr(struct amdgpu_ring *ring)
{}

/**
 * sdma_v5_2_ring_get_wptr - get the current write pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Get the current wptr from the hardware (NAVI10+).
 */
static uint64_t sdma_v5_2_ring_get_wptr(struct amdgpu_ring *ring)
{}

/**
 * sdma_v5_2_ring_set_wptr - commit the write pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Write the wptr back to the hardware (NAVI10+).
 */
static void sdma_v5_2_ring_set_wptr(struct amdgpu_ring *ring)
{}

static void sdma_v5_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{}

/**
 * sdma_v5_2_ring_emit_ib - Schedule an IB on the DMA engine
 *
 * @ring: amdgpu ring pointer
 * @job: job to retrieve vmid from
 * @ib: IB object to schedule
 * @flags: unused
 *
 * Schedule an IB in the DMA ring.
 */
static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring,
				   struct amdgpu_job *job,
				   struct amdgpu_ib *ib,
				   uint32_t flags)
{}

/**
 * sdma_v5_2_ring_emit_mem_sync - flush the IB by graphics cache rinse
 *
 * @ring: amdgpu ring pointer
 *
 * flush the IB by graphics cache rinse.
 */
static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring)
{}

/**
 * sdma_v5_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
 *
 * @ring: amdgpu ring pointer
 *
 * Emit an hdp flush packet on the requested DMA ring.
 */
static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{}

/**
 * sdma_v5_2_ring_emit_fence - emit a fence on the DMA ring
 *
 * @ring: amdgpu ring pointer
 * @addr: address
 * @seq: sequence number
 * @flags: fence related flags
 *
 * Add a DMA fence packet to the ring to write
 * the fence seq number and DMA trap packet to generate
 * an interrupt if needed.
 */
static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
				      unsigned flags)
{}


/**
 * sdma_v5_2_gfx_stop - stop the gfx async dma engines
 *
 * @adev: amdgpu_device pointer
 *
 * Stop the gfx async dma ring buffers.
 */
static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
{}

/**
 * sdma_v5_2_rlc_stop - stop the compute async dma engines
 *
 * @adev: amdgpu_device pointer
 *
 * Stop the compute async dma queues.
 */
static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev)
{}

/**
 * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch
 *
 * @adev: amdgpu_device pointer
 * @enable: enable/disable the DMA MEs context switch.
 *
 * Halt or unhalt the async dma engines context switch.
 */
static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{}

/**
 * sdma_v5_2_enable - stop the async dma engines
 *
 * @adev: amdgpu_device pointer
 * @enable: enable/disable the DMA MEs.
 *
 * Halt or unhalt the async dma engines.
 */
static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
{}

/**
 * sdma_v5_2_gfx_resume - setup and start the async dma engines
 *
 * @adev: amdgpu_device pointer
 *
 * Set up the gfx DMA ring buffers and enable them.
 * Returns 0 for success, error for failure.
 */
static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
{}

/**
 * sdma_v5_2_rlc_resume - setup and start the async dma engines
 *
 * @adev: amdgpu_device pointer
 *
 * Set up the compute DMA queues and enable them.
 * Returns 0 for success, error for failure.
 */
static int sdma_v5_2_rlc_resume(struct amdgpu_device *adev)
{}

/**
 * sdma_v5_2_load_microcode - load the sDMA ME ucode
 *
 * @adev: amdgpu_device pointer
 *
 * Loads the sDMA0/1/2/3 ucode.
 * Returns 0 for success, -EINVAL if the ucode is not available.
 */
static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
{}

static int sdma_v5_2_soft_reset(void *handle)
{}

/**
 * sdma_v5_2_start - setup and start the async dma engines
 *
 * @adev: amdgpu_device pointer
 *
 * Set up the DMA engines and enable them.
 * Returns 0 for success, error for failure.
 */
static int sdma_v5_2_start(struct amdgpu_device *adev)
{}

static int sdma_v5_2_mqd_init(struct amdgpu_device *adev, void *mqd,
			      struct amdgpu_mqd_prop *prop)
{}

static void sdma_v5_2_set_mqd_funcs(struct amdgpu_device *adev)
{}

/**
 * sdma_v5_2_ring_test_ring - simple async dma engine test
 *
 * @ring: amdgpu_ring structure holding ring information
 *
 * Test the DMA engine by writing using it to write an
 * value to memory.
 * Returns 0 for success, error for failure.
 */
static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
{}

/**
 * sdma_v5_2_ring_test_ib - test an IB on the DMA engine
 *
 * @ring: amdgpu_ring structure holding ring information
 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 *
 * Test a simple IB in the DMA ring.
 * Returns 0 on success, error on failure.
 */
static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{}


/**
 * sdma_v5_2_vm_copy_pte - update PTEs by copying them from the GART
 *
 * @ib: indirect buffer to fill with commands
 * @pe: addr of the page entry
 * @src: src addr to copy from
 * @count: number of page entries to update
 *
 * Update PTEs by copying them from the GART using sDMA.
 */
static void sdma_v5_2_vm_copy_pte(struct amdgpu_ib *ib,
				  uint64_t pe, uint64_t src,
				  unsigned count)
{}

/**
 * sdma_v5_2_vm_write_pte - update PTEs by writing them manually
 *
 * @ib: indirect buffer to fill with commands
 * @pe: addr of the page entry
 * @value: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 *
 * Update PTEs by writing them manually using sDMA.
 */
static void sdma_v5_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
				   uint64_t value, unsigned count,
				   uint32_t incr)
{}

/**
 * sdma_v5_2_vm_set_pte_pde - update the page tables using sDMA
 *
 * @ib: indirect buffer to fill with commands
 * @pe: addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: access flags
 *
 * Update the page tables using sDMA.
 */
static void sdma_v5_2_vm_set_pte_pde(struct amdgpu_ib *ib,
				     uint64_t pe,
				     uint64_t addr, unsigned count,
				     uint32_t incr, uint64_t flags)
{}

/**
 * sdma_v5_2_ring_pad_ib - pad the IB
 *
 * @ib: indirect buffer to fill with padding
 * @ring: amdgpu_ring structure holding ring information
 *
 * Pad the IB with NOPs to a boundary multiple of 8.
 */
static void sdma_v5_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{}


/**
 * sdma_v5_2_ring_emit_pipeline_sync - sync the pipeline
 *
 * @ring: amdgpu_ring pointer
 *
 * Make sure all previous operations are completed (CIK).
 */
static void sdma_v5_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{}


/**
 * sdma_v5_2_ring_emit_vm_flush - vm flush using sDMA
 *
 * @ring: amdgpu_ring pointer
 * @vmid: vmid number to use
 * @pd_addr: address
 *
 * Update the page table base and flush the VM TLB
 * using sDMA.
 */
static void sdma_v5_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
					 unsigned vmid, uint64_t pd_addr)
{}

static void sdma_v5_2_ring_emit_wreg(struct amdgpu_ring *ring,
				     uint32_t reg, uint32_t val)
{}

static void sdma_v5_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
					 uint32_t val, uint32_t mask)
{}

static void sdma_v5_2_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
						   uint32_t reg0, uint32_t reg1,
						   uint32_t ref, uint32_t mask)
{}

static int sdma_v5_2_early_init(void *handle)
{}

static unsigned sdma_v5_2_seq_to_irq_id(int seq_num)
{}

static unsigned sdma_v5_2_seq_to_trap_id(int seq_num)
{}

static int sdma_v5_2_sw_init(void *handle)
{}

static int sdma_v5_2_sw_fini(void *handle)
{}

static int sdma_v5_2_hw_init(void *handle)
{}

static int sdma_v5_2_hw_fini(void *handle)
{}

static int sdma_v5_2_suspend(void *handle)
{}

static int sdma_v5_2_resume(void *handle)
{}

static bool sdma_v5_2_is_idle(void *handle)
{}

static int sdma_v5_2_wait_for_idle(void *handle)
{}

static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring)
{}

static int sdma_v5_2_set_trap_irq_state(struct amdgpu_device *adev,
					struct amdgpu_irq_src *source,
					unsigned type,
					enum amdgpu_interrupt_state state)
{}

static int sdma_v5_2_process_trap_irq(struct amdgpu_device *adev,
				      struct amdgpu_irq_src *source,
				      struct amdgpu_iv_entry *entry)
{}

static int sdma_v5_2_process_illegal_inst_irq(struct amdgpu_device *adev,
					      struct amdgpu_irq_src *source,
					      struct amdgpu_iv_entry *entry)
{}

static bool sdma_v5_2_firmware_mgcg_support(struct amdgpu_device *adev,
						     int i)
{}

static void sdma_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
						       bool enable)
{}

static void sdma_v5_2_update_medium_grain_light_sleep(struct amdgpu_device *adev,
						      bool enable)
{}

static int sdma_v5_2_set_clockgating_state(void *handle,
					   enum amd_clockgating_state state)
{}

static int sdma_v5_2_set_powergating_state(void *handle,
					  enum amd_powergating_state state)
{}

static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags)
{}

static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring)
{}

static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
{}

const struct amd_ip_funcs sdma_v5_2_ip_funcs =;

static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs =;

static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev)
{}

static const struct amdgpu_irq_src_funcs sdma_v5_2_trap_irq_funcs =;

static const struct amdgpu_irq_src_funcs sdma_v5_2_illegal_inst_irq_funcs =;

static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev)
{}

/**
 * sdma_v5_2_emit_copy_buffer - copy buffer using the sDMA engine
 *
 * @ib: indirect buffer to copy to
 * @src_offset: src GPU address
 * @dst_offset: dst GPU address
 * @byte_count: number of bytes to xfer
 * @copy_flags: copy flags for the buffers
 *
 * Copy GPU buffers using the DMA engine.
 * Used by the amdgpu ttm implementation to move pages if
 * registered as the asic copy callback.
 */
static void sdma_v5_2_emit_copy_buffer(struct amdgpu_ib *ib,
				       uint64_t src_offset,
				       uint64_t dst_offset,
				       uint32_t byte_count,
				       uint32_t copy_flags)
{}

/**
 * sdma_v5_2_emit_fill_buffer - fill buffer using the sDMA engine
 *
 * @ib: indirect buffer to fill
 * @src_data: value to write to buffer
 * @dst_offset: dst GPU address
 * @byte_count: number of bytes to xfer
 *
 * Fill GPU buffers using the DMA engine.
 */
static void sdma_v5_2_emit_fill_buffer(struct amdgpu_ib *ib,
				       uint32_t src_data,
				       uint64_t dst_offset,
				       uint32_t byte_count)
{}

static const struct amdgpu_buffer_funcs sdma_v5_2_buffer_funcs =;

static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev)
{}

static const struct amdgpu_vm_pte_funcs sdma_v5_2_vm_pte_funcs =;

static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev)
{}

const struct amdgpu_ip_block_version sdma_v5_2_ip_block =;