linux/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c

/*
 * Copyright 2016 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#include <linux/firmware.h>

#include "amdgpu.h"
#include "amdgpu_uvd.h"
#include "amdgpu_cs.h"
#include "soc15.h"
#include "soc15d.h"
#include "soc15_common.h"
#include "mmsch_v1_0.h"

#include "uvd/uvd_7_0_offset.h"
#include "uvd/uvd_7_0_sh_mask.h"
#include "vce/vce_4_0_offset.h"
#include "vce/vce_4_0_default.h"
#include "vce/vce_4_0_sh_mask.h"
#include "nbif/nbif_6_1_offset.h"
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"

#define mmUVD_PG0_CC_UVD_HARVESTING
#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX
//UVD_PG0_CC_UVD_HARVESTING
#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT
#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK

#define UVD7_MAX_HW_INSTANCES_VEGA20

static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v7_0_start(struct amdgpu_device *adev);
static void uvd_v7_0_stop(struct amdgpu_device *adev);
static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);

static int amdgpu_ih_clientid_uvds[] =;

/**
 * uvd_v7_0_ring_get_rptr - get read pointer
 *
 * @ring: amdgpu_ring pointer
 *
 * Returns the current hardware read pointer
 */
static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
{}

/**
 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
 *
 * @ring: amdgpu_ring pointer
 *
 * Returns the current hardware enc read pointer
 */
static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
{}

/**
 * uvd_v7_0_ring_get_wptr - get write pointer
 *
 * @ring: amdgpu_ring pointer
 *
 * Returns the current hardware write pointer
 */
static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
{}

/**
 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
 *
 * @ring: amdgpu_ring pointer
 *
 * Returns the current hardware enc write pointer
 */
static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
{}

/**
 * uvd_v7_0_ring_set_wptr - set write pointer
 *
 * @ring: amdgpu_ring pointer
 *
 * Commits the write pointer to the hardware
 */
static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
{}

/**
 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
 *
 * @ring: amdgpu_ring pointer
 *
 * Commits the enc write pointer to the hardware
 */
static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
{}

/**
 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
 *
 * @ring: the engine to test on
 *
 */
static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
{}

/**
 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
 *
 * @ring: ring we should submit the msg to
 * @handle: session handle to use
 * @bo: amdgpu object for which we query the offset
 * @fence: optional fence to return
 *
 * Open up a stream for HW test
 */
static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
				       struct amdgpu_bo *bo,
				       struct dma_fence **fence)
{}

/**
 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
 *
 * @ring: ring we should submit the msg to
 * @handle: session handle to use
 * @bo: amdgpu object for which we query the offset
 * @fence: optional fence to return
 *
 * Close up a stream for HW test or if userspace failed to do so
 */
static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
					struct amdgpu_bo *bo,
					struct dma_fence **fence)
{}

/**
 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
 *
 * @ring: the engine to test on
 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 *
 */
static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{}

static int uvd_v7_0_early_init(void *handle)
{}

static int uvd_v7_0_sw_init(void *handle)
{}

static int uvd_v7_0_sw_fini(void *handle)
{}

/**
 * uvd_v7_0_hw_init - start and test UVD block
 *
 * @handle: handle used to pass amdgpu_device pointer
 *
 * Initialize the hardware, boot up the VCPU and do some testing
 */
static int uvd_v7_0_hw_init(void *handle)
{}

/**
 * uvd_v7_0_hw_fini - stop the hardware block
 *
 * @handle: handle used to pass amdgpu_device pointer
 *
 * Stop the UVD block, mark ring as not ready any more
 */
static int uvd_v7_0_hw_fini(void *handle)
{}

static int uvd_v7_0_prepare_suspend(void *handle)
{}

static int uvd_v7_0_suspend(void *handle)
{}

static int uvd_v7_0_resume(void *handle)
{}

/**
 * uvd_v7_0_mc_resume - memory controller programming
 *
 * @adev: amdgpu_device pointer
 *
 * Let the UVD memory controller know it's offsets
 */
static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
{}

static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
				struct amdgpu_mm_table *table)
{}

static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
{}

/**
 * uvd_v7_0_start - start UVD block
 *
 * @adev: amdgpu_device pointer
 *
 * Setup and start the UVD block
 */
static int uvd_v7_0_start(struct amdgpu_device *adev)
{}

/**
 * uvd_v7_0_stop - stop UVD block
 *
 * @adev: amdgpu_device pointer
 *
 * stop the UVD block
 */
static void uvd_v7_0_stop(struct amdgpu_device *adev)
{}

/**
 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
 *
 * @ring: amdgpu_ring pointer
 * @addr: address
 * @seq: sequence number
 * @flags: fence related flags
 *
 * Write a fence and a trap command to the ring.
 */
static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
				     unsigned flags)
{}

/**
 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
 *
 * @ring: amdgpu_ring pointer
 * @addr: address
 * @seq: sequence number
 * @flags: fence related flags
 *
 * Write enc a fence and a trap command to the ring.
 */
static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
			u64 seq, unsigned flags)
{}

/**
 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
 *
 * @ring: amdgpu_ring pointer
 */
static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{}

/**
 * uvd_v7_0_ring_test_ring - register write test
 *
 * @ring: amdgpu_ring pointer
 *
 * Test if we can successfully write to the context register
 */
static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
{}

/**
 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
 *
 * @p: the CS parser with the IBs
 * @job: which job this ib is in
 * @ib: which IB to patch
 *
 */
static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
					   struct amdgpu_job *job,
					   struct amdgpu_ib *ib)
{}

/**
 * uvd_v7_0_ring_emit_ib - execute indirect buffer
 *
 * @ring: amdgpu_ring pointer
 * @job: job to retrieve vmid from
 * @ib: indirect buffer to execute
 * @flags: unused
 *
 * Write ring commands to execute the indirect buffer
 */
static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
				  struct amdgpu_job *job,
				  struct amdgpu_ib *ib,
				  uint32_t flags)
{}

/**
 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
 *
 * @ring: amdgpu_ring pointer
 * @job: job to retrive vmid from
 * @ib: indirect buffer to execute
 * @flags: unused
 *
 * Write enc ring commands to execute the indirect buffer
 */
static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
					struct amdgpu_job *job,
					struct amdgpu_ib *ib,
					uint32_t flags)
{}

static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
				    uint32_t reg, uint32_t val)
{}

static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
					uint32_t val, uint32_t mask)
{}

static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
					unsigned vmid, uint64_t pd_addr)
{}

static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{}

static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
{}

static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
					    uint32_t reg, uint32_t val,
					    uint32_t mask)
{}

static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
					    unsigned int vmid, uint64_t pd_addr)
{}

static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
					uint32_t reg, uint32_t val)
{}

#if 0
static bool uvd_v7_0_is_idle(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}

static int uvd_v7_0_wait_for_idle(void *handle)
{
	unsigned i;
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	for (i = 0; i < adev->usec_timeout; i++) {
		if (uvd_v7_0_is_idle(handle))
			return 0;
	}
	return -ETIMEDOUT;
}

#define AMDGPU_UVD_STATUS_BUSY_MASK
static bool uvd_v7_0_check_soft_reset(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	u32 srbm_soft_reset = 0;
	u32 tmp = RREG32(mmSRBM_STATUS);

	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
	    (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
		    AMDGPU_UVD_STATUS_BUSY_MASK))
		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
				SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);

	if (srbm_soft_reset) {
		adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
		return true;
	} else {
		adev->uvd.inst[ring->me].srbm_soft_reset = 0;
		return false;
	}
}

static int uvd_v7_0_pre_soft_reset(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
		return 0;

	uvd_v7_0_stop(adev);
	return 0;
}

static int uvd_v7_0_soft_reset(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	u32 srbm_soft_reset;

	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
		return 0;
	srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;

	if (srbm_soft_reset) {
		u32 tmp;

		tmp = RREG32(mmSRBM_SOFT_RESET);
		tmp |= srbm_soft_reset;
		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
		WREG32(mmSRBM_SOFT_RESET, tmp);
		tmp = RREG32(mmSRBM_SOFT_RESET);

		udelay(50);

		tmp &= ~srbm_soft_reset;
		WREG32(mmSRBM_SOFT_RESET, tmp);
		tmp = RREG32(mmSRBM_SOFT_RESET);

		/* Wait a little for things to settle down */
		udelay(50);
	}

	return 0;
}

static int uvd_v7_0_post_soft_reset(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
		return 0;

	mdelay(5);

	return uvd_v7_0_start(adev);
}
#endif

static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
					struct amdgpu_irq_src *source,
					unsigned type,
					enum amdgpu_interrupt_state state)
{}

static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
				      struct amdgpu_irq_src *source,
				      struct amdgpu_iv_entry *entry)
{}

#if 0
static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
{
	uint32_t data, data1, data2, suvd_flags;

	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
	data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);

	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);

	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
		     UVD_SUVD_CGC_GATE__SIT_MASK |
		     UVD_SUVD_CGC_GATE__SMP_MASK |
		     UVD_SUVD_CGC_GATE__SCM_MASK |
		     UVD_SUVD_CGC_GATE__SDB_MASK;

	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));

	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
			UVD_CGC_CTRL__SYS_MODE_MASK |
			UVD_CGC_CTRL__UDEC_MODE_MASK |
			UVD_CGC_CTRL__MPEG2_MODE_MASK |
			UVD_CGC_CTRL__REGS_MODE_MASK |
			UVD_CGC_CTRL__RBC_MODE_MASK |
			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
			UVD_CGC_CTRL__IDCT_MODE_MASK |
			UVD_CGC_CTRL__MPRD_MODE_MASK |
			UVD_CGC_CTRL__MPC_MODE_MASK |
			UVD_CGC_CTRL__LBSI_MODE_MASK |
			UVD_CGC_CTRL__LRBBM_MODE_MASK |
			UVD_CGC_CTRL__WCB_MODE_MASK |
			UVD_CGC_CTRL__VCPU_MODE_MASK |
			UVD_CGC_CTRL__JPEG_MODE_MASK |
			UVD_CGC_CTRL__JPEG2_MODE_MASK |
			UVD_CGC_CTRL__SCPU_MODE_MASK);
	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
	data1 |= suvd_flags;

	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
}

static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
{
	uint32_t data, data1, cgc_flags, suvd_flags;

	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);

	cgc_flags = UVD_CGC_GATE__SYS_MASK |
		UVD_CGC_GATE__UDEC_MASK |
		UVD_CGC_GATE__MPEG2_MASK |
		UVD_CGC_GATE__RBC_MASK |
		UVD_CGC_GATE__LMI_MC_MASK |
		UVD_CGC_GATE__IDCT_MASK |
		UVD_CGC_GATE__MPRD_MASK |
		UVD_CGC_GATE__MPC_MASK |
		UVD_CGC_GATE__LBSI_MASK |
		UVD_CGC_GATE__LRBBM_MASK |
		UVD_CGC_GATE__UDEC_RE_MASK |
		UVD_CGC_GATE__UDEC_CM_MASK |
		UVD_CGC_GATE__UDEC_IT_MASK |
		UVD_CGC_GATE__UDEC_DB_MASK |
		UVD_CGC_GATE__UDEC_MP_MASK |
		UVD_CGC_GATE__WCB_MASK |
		UVD_CGC_GATE__VCPU_MASK |
		UVD_CGC_GATE__SCPU_MASK |
		UVD_CGC_GATE__JPEG_MASK |
		UVD_CGC_GATE__JPEG2_MASK;

	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
				UVD_SUVD_CGC_GATE__SIT_MASK |
				UVD_SUVD_CGC_GATE__SMP_MASK |
				UVD_SUVD_CGC_GATE__SCM_MASK |
				UVD_SUVD_CGC_GATE__SDB_MASK;

	data |= cgc_flags;
	data1 |= suvd_flags;

	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
}

static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
{
	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);

	if (enable)
		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
	else
		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);

	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
}


static int uvd_v7_0_set_clockgating_state(void *handle,
					  enum amd_clockgating_state state)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	bool enable = (state == AMD_CG_STATE_GATE);

	uvd_v7_0_set_bypass_mode(adev, enable);

	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
		return 0;

	if (enable) {
		/* disable HW gating and enable Sw gating */
		uvd_v7_0_set_sw_clock_gating(adev);
	} else {
		/* wait for STATUS to clear */
		if (uvd_v7_0_wait_for_idle(handle))
			return -EBUSY;

		/* enable HW gates because UVD is idle */
		/* uvd_v7_0_set_hw_clock_gating(adev); */
	}

	return 0;
}

static int uvd_v7_0_set_powergating_state(void *handle,
					  enum amd_powergating_state state)
{
	/* This doesn't actually powergate the UVD block.
	 * That's done in the dpm code via the SMC.  This
	 * just re-inits the block as necessary.  The actual
	 * gating still happens in the dpm code.  We should
	 * revisit this when there is a cleaner line between
	 * the smc and the hw blocks
	 */
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
		return 0;

	WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);

	if (state == AMD_PG_STATE_GATE) {
		uvd_v7_0_stop(adev);
		return 0;
	} else {
		return uvd_v7_0_start(adev);
	}
}
#endif

static int uvd_v7_0_set_clockgating_state(void *handle,
					  enum amd_clockgating_state state)
{}

const struct amd_ip_funcs uvd_v7_0_ip_funcs =;

static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs =;

static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs =;

static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
{}

static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
{}

static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs =;

static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
{}

const struct amdgpu_ip_block_version uvd_v7_0_ip_block =;