#include <linux/firmware.h>
#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_vce.h"
#include "soc15.h"
#include "soc15d.h"
#include "soc15_common.h"
#include "mmsch_v1_0.h"
#include "vce/vce_4_0_offset.h"
#include "vce/vce_4_0_default.h"
#include "vce/vce_4_0_sh_mask.h"
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
#include "ivsrcid/vce/irqsrcs_vce_4_0.h"
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK …
#define VCE_V4_0_FW_SIZE …
#define VCE_V4_0_STACK_SIZE …
#define VCE_V4_0_DATA_SIZE …
static void vce_v4_0_mc_resume(struct amdgpu_device *adev);
static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
{ … }
static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
{ … }
static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
{ … }
static int vce_v4_0_firmware_loaded(struct amdgpu_device *adev)
{ … }
static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
struct amdgpu_mm_table *table)
{ … }
static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
{ … }
static int vce_v4_0_start(struct amdgpu_device *adev)
{ … }
static int vce_v4_0_stop(struct amdgpu_device *adev)
{ … }
static int vce_v4_0_early_init(void *handle)
{ … }
static int vce_v4_0_sw_init(void *handle)
{ … }
static int vce_v4_0_sw_fini(void *handle)
{ … }
static int vce_v4_0_hw_init(void *handle)
{ … }
static int vce_v4_0_hw_fini(void *handle)
{ … }
static int vce_v4_0_suspend(void *handle)
{ … }
static int vce_v4_0_resume(void *handle)
{ … }
static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
{ … }
static int vce_v4_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{ … }
#if 0
static bool vce_v4_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 mask = 0;
mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
return !(RREG32(mmSRBM_STATUS2) & mask);
}
static int vce_v4_0_wait_for_idle(void *handle)
{
unsigned i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++)
if (vce_v4_0_is_idle(handle))
return 0;
return -ETIMEDOUT;
}
#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK …
#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK …
#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK …
#define AMDGPU_VCE_STATUS_BUSY_MASK …
static bool vce_v4_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
mutex_lock(&adev->grbm_idx_mutex);
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
mutex_unlock(&adev->grbm_idx_mutex);
if (srbm_soft_reset) {
adev->vce.srbm_soft_reset = srbm_soft_reset;
return true;
} else {
adev->vce.srbm_soft_reset = 0;
return false;
}
}
static int vce_v4_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
if (!adev->vce.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->vce.srbm_soft_reset;
if (srbm_soft_reset) {
u32 tmp;
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
}
return 0;
}
static int vce_v4_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
return vce_v4_0_suspend(adev);
}
static int vce_v4_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
return vce_v4_0_resume(adev);
}
static void vce_v4_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
{
u32 tmp, data;
tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL));
if (override)
data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
else
data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
if (tmp != data)
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data);
}
static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
bool gated)
{
u32 data;
vce_v4_0_override_vce_clock_gating(adev, true);
if (gated) {
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
data |= 0x1ff;
data &= ~0xef0000;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
data |= 0x3ff000;
data &= ~0xffc00000;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
data |= 0x2;
data &= ~0x00010000;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
data |= 0x37f;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
0x8;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
} else {
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
data &= ~0x80010;
data |= 0xe70008;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
data |= 0xffc00000;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
data |= 0x10000;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
data &= ~0xffc00000;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
0x8);
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
}
vce_v4_0_override_vce_clock_gating(adev, false);
}
static void vce_v4_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
{
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
if (enable)
tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
else
tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
}
static int vce_v4_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE);
int i;
if ((adev->asic_type == CHIP_POLARIS10) ||
(adev->asic_type == CHIP_TONGA) ||
(adev->asic_type == CHIP_FIJI))
vce_v4_0_set_bypass_mode(adev, enable);
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
return 0;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < 2; i++) {
if (adev->vce.harvest_config & (1 << i))
continue;
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
if (enable) {
uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A);
data &= ~(0xf | 0xff0);
data |= ((0x0 << 0) | (0x04 << 4));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING);
data &= ~(0xf | 0xff0);
data |= ((0x0 << 0) | (0x04 << 4));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING, data);
}
vce_v4_0_set_vce_sw_clock_gating(adev, enable);
}
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
#endif
static int vce_v4_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{ … }
static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
struct amdgpu_ib *ib, uint32_t flags)
{ … }
static void vce_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags)
{ … }
static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
{ … }
static void vce_v4_0_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{ … }
static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr)
{ … }
static void vce_v4_0_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{ … }
static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{ … }
static int vce_v4_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{ … }
const struct amd_ip_funcs vce_v4_0_ip_funcs = …;
static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = …;
static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
{ … }
static const struct amdgpu_irq_src_funcs vce_v4_0_irq_funcs = …;
static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->vce.irq.num_types = 1;
adev->vce.irq.funcs = &vce_v4_0_irq_funcs;
};
const struct amdgpu_ip_block_version vce_v4_0_ip_block = …;