linux/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c

/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Christian König <[email protected]>
 */

#include <linux/delay.h>
#include <linux/firmware.h>

#include "amdgpu.h"
#include "amdgpu_uvd.h"
#include "vid.h"
#include "uvd/uvd_5_0_d.h"
#include "uvd/uvd_5_0_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#include "bif/bif_5_0_d.h"
#include "vi.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
#include "ivsrcid/ivsrcid_vislands30.h"

static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v5_0_start(struct amdgpu_device *adev);
static void uvd_v5_0_stop(struct amdgpu_device *adev);
static int uvd_v5_0_set_clockgating_state(void *handle,
					  enum amd_clockgating_state state);
static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
				 bool enable);
/**
 * uvd_v5_0_ring_get_rptr - get read pointer
 *
 * @ring: amdgpu_ring pointer
 *
 * Returns the current hardware read pointer
 */
static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
{}

/**
 * uvd_v5_0_ring_get_wptr - get write pointer
 *
 * @ring: amdgpu_ring pointer
 *
 * Returns the current hardware write pointer
 */
static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
{}

/**
 * uvd_v5_0_ring_set_wptr - set write pointer
 *
 * @ring: amdgpu_ring pointer
 *
 * Commits the write pointer to the hardware
 */
static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
{}

static int uvd_v5_0_early_init(void *handle)
{}

static int uvd_v5_0_sw_init(void *handle)
{}

static int uvd_v5_0_sw_fini(void *handle)
{}

/**
 * uvd_v5_0_hw_init - start and test UVD block
 *
 * @handle: handle used to pass amdgpu_device pointer
 *
 * Initialize the hardware, boot up the VCPU and do some testing
 */
static int uvd_v5_0_hw_init(void *handle)
{}

/**
 * uvd_v5_0_hw_fini - stop the hardware block
 *
 * @handle: handle used to pass amdgpu_device pointer
 *
 * Stop the UVD block, mark ring as not ready any more
 */
static int uvd_v5_0_hw_fini(void *handle)
{}

static int uvd_v5_0_prepare_suspend(void *handle)
{}

static int uvd_v5_0_suspend(void *handle)
{}

static int uvd_v5_0_resume(void *handle)
{}

/**
 * uvd_v5_0_mc_resume - memory controller programming
 *
 * @adev: amdgpu_device pointer
 *
 * Let the UVD memory controller know it's offsets
 */
static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
{}

/**
 * uvd_v5_0_start - start UVD block
 *
 * @adev: amdgpu_device pointer
 *
 * Setup and start the UVD block
 */
static int uvd_v5_0_start(struct amdgpu_device *adev)
{}

/**
 * uvd_v5_0_stop - stop UVD block
 *
 * @adev: amdgpu_device pointer
 *
 * stop the UVD block
 */
static void uvd_v5_0_stop(struct amdgpu_device *adev)
{}

/**
 * uvd_v5_0_ring_emit_fence - emit an fence & trap command
 *
 * @ring: amdgpu_ring pointer
 * @addr: address
 * @seq: sequence number
 * @flags: fence related flags
 *
 * Write a fence and a trap command to the ring.
 */
static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
				     unsigned flags)
{}

/**
 * uvd_v5_0_ring_test_ring - register write test
 *
 * @ring: amdgpu_ring pointer
 *
 * Test if we can successfully write to the context register
 */
static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
{}

/**
 * uvd_v5_0_ring_emit_ib - execute indirect buffer
 *
 * @ring: amdgpu_ring pointer
 * @job: job to retrieve vmid from
 * @ib: indirect buffer to execute
 * @flags: unused
 *
 * Write ring commands to execute the indirect buffer
 */
static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
				  struct amdgpu_job *job,
				  struct amdgpu_ib *ib,
				  uint32_t flags)
{}

static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{}

static bool uvd_v5_0_is_idle(void *handle)
{}

static int uvd_v5_0_wait_for_idle(void *handle)
{}

static int uvd_v5_0_soft_reset(void *handle)
{}

static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
					struct amdgpu_irq_src *source,
					unsigned type,
					enum amdgpu_interrupt_state state)
{}

static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
				      struct amdgpu_irq_src *source,
				      struct amdgpu_iv_entry *entry)
{}

static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
{}

static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
{}

#if 0
static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
{
	uint32_t data, data1, cgc_flags, suvd_flags;

	data = RREG32(mmUVD_CGC_GATE);
	data1 = RREG32(mmUVD_SUVD_CGC_GATE);

	cgc_flags = UVD_CGC_GATE__SYS_MASK |
				UVD_CGC_GATE__UDEC_MASK |
				UVD_CGC_GATE__MPEG2_MASK |
				UVD_CGC_GATE__RBC_MASK |
				UVD_CGC_GATE__LMI_MC_MASK |
				UVD_CGC_GATE__IDCT_MASK |
				UVD_CGC_GATE__MPRD_MASK |
				UVD_CGC_GATE__MPC_MASK |
				UVD_CGC_GATE__LBSI_MASK |
				UVD_CGC_GATE__LRBBM_MASK |
				UVD_CGC_GATE__UDEC_RE_MASK |
				UVD_CGC_GATE__UDEC_CM_MASK |
				UVD_CGC_GATE__UDEC_IT_MASK |
				UVD_CGC_GATE__UDEC_DB_MASK |
				UVD_CGC_GATE__UDEC_MP_MASK |
				UVD_CGC_GATE__WCB_MASK |
				UVD_CGC_GATE__VCPU_MASK |
				UVD_CGC_GATE__SCPU_MASK;

	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
				UVD_SUVD_CGC_GATE__SIT_MASK |
				UVD_SUVD_CGC_GATE__SMP_MASK |
				UVD_SUVD_CGC_GATE__SCM_MASK |
				UVD_SUVD_CGC_GATE__SDB_MASK;

	data |= cgc_flags;
	data1 |= suvd_flags;

	WREG32(mmUVD_CGC_GATE, data);
	WREG32(mmUVD_SUVD_CGC_GATE, data1);
}
#endif

static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
				 bool enable)
{}

static int uvd_v5_0_set_clockgating_state(void *handle,
					  enum amd_clockgating_state state)
{}

static int uvd_v5_0_set_powergating_state(void *handle,
					  enum amd_powergating_state state)
{}

static void uvd_v5_0_get_clockgating_state(void *handle, u64 *flags)
{}

static const struct amd_ip_funcs uvd_v5_0_ip_funcs =;

static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs =;

static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
{}

static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs =;

static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
{}

const struct amdgpu_ip_block_version uvd_v5_0_ip_block =;