#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "nbio_v7_9.h"
#include "amdgpu_ras.h"
#include "nbio/nbio_7_9_0_offset.h"
#include "nbio/nbio_7_9_0_sh_mask.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
#define NPS_MODE_MASK …
#define smnPCIEP_NAK_COUNTER …
static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
{ … }
static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev)
{ … }
static void nbio_v7_9_mc_access_enable(struct amdgpu_device *adev, bool enable)
{ … }
static u32 nbio_v7_9_get_memsize(struct amdgpu_device *adev)
{ … }
static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index, int doorbell_size)
{ … }
static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
int doorbell_index, int instance)
{ … }
static void nbio_v7_9_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{ … }
static void nbio_v7_9_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
bool enable)
{ … }
static void nbio_v7_9_ih_doorbell_range(struct amdgpu_device *adev,
bool use_doorbell, int doorbell_index)
{ … }
static void nbio_v7_9_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{ … }
static void nbio_v7_9_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{ … }
static void nbio_v7_9_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags)
{ … }
static void nbio_v7_9_ih_control(struct amdgpu_device *adev)
{ … }
static u32 nbio_v7_9_get_hdp_flush_req_offset(struct amdgpu_device *adev)
{ … }
static u32 nbio_v7_9_get_hdp_flush_done_offset(struct amdgpu_device *adev)
{ … }
static u32 nbio_v7_9_get_pcie_index_offset(struct amdgpu_device *adev)
{ … }
static u32 nbio_v7_9_get_pcie_data_offset(struct amdgpu_device *adev)
{ … }
static u32 nbio_v7_9_get_pcie_index_hi_offset(struct amdgpu_device *adev)
{ … }
const struct nbio_hdp_flush_reg nbio_v7_9_hdp_flush_reg = …;
static void nbio_v7_9_enable_doorbell_interrupt(struct amdgpu_device *adev,
bool enable)
{ … }
static int nbio_v7_9_get_compute_partition_mode(struct amdgpu_device *adev)
{ … }
static u32 nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev,
u32 *supp_modes)
{ … }
static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
{ … }
static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev)
{ … }
#define MMIO_REG_HOLE_OFFSET …
static void nbio_v7_9_set_reg_remap(struct amdgpu_device *adev)
{ … }
const struct amdgpu_nbio_funcs nbio_v7_9_funcs = …;
static void nbio_v7_9_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{ … }
static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
{ … }
static void nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
{ … }
static int nbio_v7_9_set_ras_controller_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{ … }
static int nbio_v7_9_process_ras_controller_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{ … }
static int nbio_v7_9_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{ … }
static int nbio_v7_9_process_err_event_athub_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{ … }
static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_controller_irq_funcs = …;
static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_err_event_athub_irq_funcs = …;
static int nbio_v7_9_init_ras_controller_interrupt (struct amdgpu_device *adev)
{ … }
static int nbio_v7_9_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
{ … }
const struct amdgpu_ras_block_hw_ops nbio_v7_9_ras_hw_ops = …;
struct amdgpu_nbio_ras nbio_v7_9_ras = …;