linux/drivers/gpu/drm/amd/amdgpu/amdgpu.h

/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
#ifndef __AMDGPU_H__
#define __AMDGPU_H__

#ifdef pr_fmt
#undef pr_fmt
#endif

#define pr_fmt(fmt)

#ifdef dev_fmt
#undef dev_fmt
#endif

#define dev_fmt(fmt)

#include "amdgpu_ctx.h"

#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/rbtree.h>
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
#include <linux/pci.h>

#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>

#include <drm/amdgpu_drm.h>
#include <drm/drm_gem.h>
#include <drm/drm_ioctl.h>

#include <kgd_kfd_interface.h>
#include "dm_pp_interface.h"
#include "kgd_pp_interface.h"

#include "amd_shared.h"
#include "amdgpu_mode.h"
#include "amdgpu_ih.h"
#include "amdgpu_irq.h"
#include "amdgpu_ucode.h"
#include "amdgpu_ttm.h"
#include "amdgpu_psp.h"
#include "amdgpu_gds.h"
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
#include "amdgpu_vm.h"
#include "amdgpu_dpm.h"
#include "amdgpu_acp.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "amdgpu_vcn.h"
#include "amdgpu_jpeg.h"
#include "amdgpu_vpe.h"
#include "amdgpu_umsch_mm.h"
#include "amdgpu_gmc.h"
#include "amdgpu_gfx.h"
#include "amdgpu_sdma.h"
#include "amdgpu_lsdma.h"
#include "amdgpu_nbio.h"
#include "amdgpu_hdp.h"
#include "amdgpu_dm.h"
#include "amdgpu_virt.h"
#include "amdgpu_csa.h"
#include "amdgpu_mes_ctx.h"
#include "amdgpu_gart.h"
#include "amdgpu_debugfs.h"
#include "amdgpu_job.h"
#include "amdgpu_bo_list.h"
#include "amdgpu_gem.h"
#include "amdgpu_doorbell.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_discovery.h"
#include "amdgpu_mes.h"
#include "amdgpu_umc.h"
#include "amdgpu_mmhub.h"
#include "amdgpu_gfxhub.h"
#include "amdgpu_df.h"
#include "amdgpu_smuio.h"
#include "amdgpu_fdinfo.h"
#include "amdgpu_mca.h"
#include "amdgpu_aca.h"
#include "amdgpu_ras.h"
#include "amdgpu_xcp.h"
#include "amdgpu_seq64.h"
#include "amdgpu_reg_state.h"
#if defined(CONFIG_DRM_AMD_ISP)
#include "amdgpu_isp.h"
#endif

#define MAX_GPU_INSTANCE

struct amdgpu_gpu_instance {};

struct amdgpu_mgpu_info {};

enum amdgpu_ss {};

struct amdgpu_hwip_reg_entry {};

struct amdgpu_watchdog_timer {};

#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH

/*
 * Modules parameters.
 */
extern int amdgpu_modeset;
extern unsigned int amdgpu_vram_limit;
extern int amdgpu_vis_vram_limit;
extern int amdgpu_gart_size;
extern int amdgpu_gtt_size;
extern int amdgpu_moverate;
extern int amdgpu_audio;
extern int amdgpu_disp_priority;
extern int amdgpu_hw_i2c;
extern int amdgpu_pcie_gen2;
extern int amdgpu_msi;
extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
extern int amdgpu_dpm;
extern int amdgpu_fw_load_type;
extern int amdgpu_aspm;
extern int amdgpu_runtime_pm;
extern uint amdgpu_ip_block_mask;
extern int amdgpu_bapm;
extern int amdgpu_deep_color;
extern int amdgpu_vm_size;
extern int amdgpu_vm_block_size;
extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
extern int amdgpu_exp_hw_support;
extern int amdgpu_dc;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern uint amdgpu_pcie_gen_cap;
extern uint amdgpu_pcie_lane_cap;
extern u64 amdgpu_cg_mask;
extern uint amdgpu_pg_mask;
extern uint amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display;
extern uint amdgpu_pp_feature_mask;
extern uint amdgpu_force_long_training;
extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
extern int amdgpu_smu_pptable_id;
extern uint amdgpu_dc_feature_mask;
extern uint amdgpu_freesync_vid_mode;
extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dc_visual_confirm;
extern int amdgpu_dm_abm_level;
extern int amdgpu_backlight;
extern int amdgpu_damage_clips;
extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask;
extern int amdgpu_bad_page_threshold;
extern bool amdgpu_ignore_bad_page_threshold;
extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer;
extern int amdgpu_async_gfx_ring;
extern int amdgpu_mcbp;
extern int amdgpu_discovery;
extern int amdgpu_mes;
extern int amdgpu_mes_log_enable;
extern int amdgpu_mes_kiq;
extern int amdgpu_uni_mes;
extern int amdgpu_noretry;
extern int amdgpu_force_asic_type;
extern int amdgpu_smartshift_bias;
extern int amdgpu_use_xgmi_p2p;
extern int amdgpu_mtype_local;
extern bool enforce_isolation;
#ifdef CONFIG_HSA_AMD
extern int sched_policy;
extern bool debug_evictions;
extern bool no_system_mem_limit;
extern int halt_if_hws_hang;
#else
static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS;
static const bool __maybe_unused debug_evictions; /* = false */
static const bool __maybe_unused no_system_mem_limit;
static const int __maybe_unused halt_if_hws_hang;
#endif
#ifdef CONFIG_HSA_AMD_P2P
extern bool pcie_p2p;
#endif

extern int amdgpu_tmz;
extern int amdgpu_reset_method;

#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
extern int amdgpu_cik_support;
#endif
extern int amdgpu_num_kcq;

#define AMDGPU_VCNFW_LOG_SIZE
#define AMDGPU_UMSCHFW_LOG_SIZE
extern int amdgpu_vcnfw_log;
extern int amdgpu_sg_display;
extern int amdgpu_umsch_mm;
extern int amdgpu_seamless;
extern int amdgpu_umsch_mm_fwlog;

extern int amdgpu_user_partt_mode;
extern int amdgpu_agp;

extern int amdgpu_wbrf;

#define AMDGPU_VM_MAX_NUM_CTX
#define AMDGPU_SG_THRESHOLD
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS
#define AMDGPU_MAX_USEC_TIMEOUT
#define AMDGPU_FENCE_JIFFIES_TIMEOUT
#define AMDGPU_DEBUGFS_MAX_COMPONENTS
#define AMDGPUFB_CONN_LIMIT
#define AMDGPU_BIOS_NUM_SCRATCH

#define AMDGPU_VBIOS_VGA_ALLOCATION

/* hard reset data */
#define AMDGPU_ASIC_RESET_DATA

/* reset flags */
#define AMDGPU_RESET_GFX
#define AMDGPU_RESET_COMPUTE
#define AMDGPU_RESET_DMA
#define AMDGPU_RESET_CP
#define AMDGPU_RESET_GRBM
#define AMDGPU_RESET_DMA1
#define AMDGPU_RESET_RLC
#define AMDGPU_RESET_SEM
#define AMDGPU_RESET_IH
#define AMDGPU_RESET_VMC
#define AMDGPU_RESET_MC
#define AMDGPU_RESET_DISPLAY
#define AMDGPU_RESET_UVD
#define AMDGPU_RESET_VCE
#define AMDGPU_RESET_VCE1

/* max cursor sizes (in pixels) */
#define CIK_CURSOR_WIDTH
#define CIK_CURSOR_HEIGHT

/* smart shift bias level limits */
#define AMDGPU_SMARTSHIFT_MAX_BIAS
#define AMDGPU_SMARTSHIFT_MIN_BIAS

/* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */
#define AMDGPU_SWCTF_EXTRA_DELAY

struct amdgpu_xcp_mgr;
struct amdgpu_device;
struct amdgpu_irq_src;
struct amdgpu_fpriv;
struct amdgpu_bo_va_mapping;
struct kfd_vm_fault_info;
struct amdgpu_hive_info;
struct amdgpu_reset_context;
struct amdgpu_reset_control;

enum amdgpu_cp_irq {};

enum amdgpu_thermal_irq {};

enum amdgpu_kiq_irq {};
#define SRIOV_USEC_TIMEOUT
#define MAX_KIQ_REG_WAIT
#define MAX_KIQ_REG_BAILOUT_INTERVAL
#define MAX_KIQ_REG_TRY

int amdgpu_device_ip_set_clockgating_state(void *dev,
					   enum amd_ip_block_type block_type,
					   enum amd_clockgating_state state);
int amdgpu_device_ip_set_powergating_state(void *dev,
					   enum amd_ip_block_type block_type,
					   enum amd_powergating_state state);
void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
					    u64 *flags);
int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
				   enum amd_ip_block_type block_type);
bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
			      enum amd_ip_block_type block_type);

#define AMDGPU_MAX_IP_NUM

struct amdgpu_ip_block_status {};

struct amdgpu_ip_block_version {};

struct amdgpu_ip_block {};

int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
				       enum amd_ip_block_type type,
				       u32 major, u32 minor);

struct amdgpu_ip_block *
amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
			      enum amd_ip_block_type type);

int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
			       const struct amdgpu_ip_block_version *ip_block_version);

/*
 * BIOS.
 */
bool amdgpu_get_bios(struct amdgpu_device *adev);
bool amdgpu_read_bios(struct amdgpu_device *adev);
bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
				     u8 *bios, u32 length_bytes);
/*
 * Clocks
 */

#define AMDGPU_MAX_PPLL

struct amdgpu_clock {};

/* sub-allocation manager, it has to be protected by another lock.
 * By conception this is an helper for other part of the driver
 * like the indirect buffer or semaphore, which both have their
 * locking.
 *
 * Principe is simple, we keep a list of sub allocation in offset
 * order (first entry has offset == 0, last entry has the highest
 * offset).
 *
 * When allocating new object we first check if there is room at
 * the end total_size - (last_object_offset + last_object_size) >=
 * alloc_size. If so we allocate new object there.
 *
 * When there is not enough room at the end, we start waiting for
 * each sub object until we reach object_offset+object_size >=
 * alloc_size, this object then become the sub object we return.
 *
 * Alignment can't be bigger than page size.
 *
 * Hole are not considered for allocation to keep things simple.
 * Assumption is that there won't be hole (all object on same
 * alignment).
 */

struct amdgpu_sa_manager {};

int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void);

/*
 * IRQS.
 */

struct amdgpu_flip_work {};


/*
 * file private structure
 */

struct amdgpu_fpriv {};

int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);

/*
 * Writeback
 */
#define AMDGPU_MAX_WB

struct amdgpu_wb {};

int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);

/*
 * Benchmarking
 */
int amdgpu_benchmark(struct amdgpu_device *adev, int test_number);

/*
 * ASIC specific register table accessible by UMD
 */
struct amdgpu_allowed_register_entry {};

/**
 * enum amd_reset_method - Methods for resetting AMD GPU devices
 *
 * @AMD_RESET_METHOD_NONE: The device will not be reset.
 * @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs.
 * @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the
 *                   any device.
 * @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.)
 *                   individually. Suitable only for some discrete GPU, not
 *                   available for all ASICs.
 * @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs
 *                   are reset depends on the ASIC. Notably doesn't reset IPs
 *                   shared with the CPU on APUs or the memory controllers (so
 *                   VRAM is not lost). Not available on all ASICs.
 * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card
 *                  but without powering off the PCI bus. Suitable only for
 *                  discrete GPUs.
 * @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset
 *                 and does a secondary bus reset or FLR, depending on what the
 *                 underlying hardware supports.
 *
 * Methods available for AMD GPU driver for resetting the device. Not all
 * methods are suitable for every device. User can override the method using
 * module parameter `reset_method`.
 */
enum amd_reset_method {};

struct amdgpu_video_codec_info {};

#define codec_info_build(type, width, height, level)

struct amdgpu_video_codecs {};

/*
 * ASIC specific functions.
 */
struct amdgpu_asic_funcs {};

/*
 * IOCTL.
 */
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp);

int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *filp);
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp);

/* VRAM scratch page for HDP bug, default vram page */
struct amdgpu_mem_scratch {};

/*
 * CGS
 */
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);

/*
 * Core structure, functions and helpers.
 */
amdgpu_rreg_t;
amdgpu_wreg_t;

amdgpu_rreg_ext_t;
amdgpu_wreg_ext_t;

amdgpu_rreg64_t;
amdgpu_wreg64_t;

amdgpu_rreg64_ext_t;
amdgpu_wreg64_ext_t;

amdgpu_block_rreg_t;
amdgpu_block_wreg_t;

struct amdgpu_mmio_remap {};

/* Define the HW IP blocks will be used in driver , add more if necessary */
enum amd_hw_ip_block_type {};

#define HWIP_MAX_INSTANCE

#define HW_ID_MAX
#define IP_VERSION_FULL(mj, mn, rv, var, srev)
#define IP_VERSION(mj, mn, rv)
#define IP_VERSION_MAJ(ver)
#define IP_VERSION_MIN(ver)
#define IP_VERSION_REV(ver)
#define IP_VERSION_VARIANT(ver)
#define IP_VERSION_SUBREV(ver)
#define IP_VERSION_MAJ_MIN_REV(ver)

struct amdgpu_ip_map_info {};

struct amd_powerplay {};

struct ip_discovery_top;

/* polaris10 kickers */
#define ASICID_IS_P20(did, rid)

#define ASICID_IS_P30(did, rid)

/* polaris11 kickers */
#define ASICID_IS_P21(did, rid)

#define ASICID_IS_P31(did, rid)

/* polaris12 kickers */
#define ASICID_IS_P23(did, rid)

struct amdgpu_mqd_prop {};

struct amdgpu_mqd {};

#define AMDGPU_RESET_MAGIC_NUM
#define AMDGPU_MAX_DF_PERFMONS
struct amdgpu_reset_domain;
struct amdgpu_fru_info;

struct amdgpu_reset_info {};

/*
 * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
 */
#define AMDGPU_HAS_VRAM(_adev)

struct amdgpu_device {};

static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
					 uint8_t ip, uint8_t inst)
{}

static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev,
					      uint8_t ip, uint8_t inst)
{}

static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
{}

static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
{}

static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
{}

int amdgpu_device_init(struct amdgpu_device *adev,
		       uint32_t flags);
void amdgpu_device_fini_hw(struct amdgpu_device *adev);
void amdgpu_device_fini_sw(struct amdgpu_device *adev);

int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);

void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
			     void *buf, size_t size, bool write);
size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
				 void *buf, size_t size, bool write);

void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
			       void *buf, size_t size, bool write);
uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
			    uint32_t inst, uint32_t reg_addr, char reg_name[],
			    uint32_t expected_value, uint32_t mask);
uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
			    uint32_t reg, uint32_t acc_flags);
u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
				    u64 reg_addr);
uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
				uint32_t reg, uint32_t acc_flags,
				uint32_t xcc_id);
void amdgpu_device_wreg(struct amdgpu_device *adev,
			uint32_t reg, uint32_t v,
			uint32_t acc_flags);
void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
				     u64 reg_addr, u32 reg_data);
void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
			    uint32_t reg, uint32_t v,
			    uint32_t acc_flags,
			    uint32_t xcc_id);
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
			     uint32_t reg, uint32_t v, uint32_t xcc_id);
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);

u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
				u32 reg_addr);
u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
				  u32 reg_addr);
u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
				  u64 reg_addr);
void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
				 u32 reg_addr, u32 reg_data);
void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
				   u32 reg_addr, u64 reg_data);
void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
				   u64 reg_addr, u64 reg_data);
u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);

void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);

int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
				 struct amdgpu_reset_context *reset_context);

int amdgpu_do_asic_reset(struct list_head *device_list_handle,
			 struct amdgpu_reset_context *reset_context);

int emu_soc_asic_init(struct amdgpu_device *adev);

/*
 * Registers read & write functions.
 */
#define AMDGPU_REGS_NO_KIQ
#define AMDGPU_REGS_RLC

#define RREG32_NO_KIQ(reg)
#define WREG32_NO_KIQ(reg, v)

#define RREG32_KIQ(reg)
#define WREG32_KIQ(reg, v)

#define RREG8(reg)
#define WREG8(reg, v)

#define RREG32(reg)
#define DREG32(reg)
#define WREG32(reg, v)
#define REG_SET(FIELD, v)
#define REG_GET(FIELD, v)
#define RREG32_XCC(reg, inst)
#define WREG32_XCC(reg, v, inst)
#define RREG32_PCIE(reg)
#define WREG32_PCIE(reg, v)
#define RREG32_PCIE_PORT(reg)
#define WREG32_PCIE_PORT(reg, v)
#define RREG32_PCIE_EXT(reg)
#define WREG32_PCIE_EXT(reg, v)
#define RREG64_PCIE(reg)
#define WREG64_PCIE(reg, v)
#define RREG64_PCIE_EXT(reg)
#define WREG64_PCIE_EXT(reg, v)
#define RREG32_SMC(reg)
#define WREG32_SMC(reg, v)
#define RREG32_UVD_CTX(reg)
#define WREG32_UVD_CTX(reg, v)
#define RREG32_DIDT(reg)
#define WREG32_DIDT(reg, v)
#define RREG32_GC_CAC(reg)
#define WREG32_GC_CAC(reg, v)
#define RREG32_SE_CAC(reg)
#define WREG32_SE_CAC(reg, v)
#define RREG32_AUDIO_ENDPT(block, reg)
#define WREG32_AUDIO_ENDPT(block, reg, v)
#define WREG32_P(reg, val, mask)
#define WREG32_AND(reg, and)
#define WREG32_OR(reg, or)
#define WREG32_PLL_P(reg, val, mask)

#define WREG32_SMC_P(_Reg, _Val, _Mask)

#define DREG32_SYS(sqf, adev, reg)

#define REG_FIELD_SHIFT(reg, field)
#define REG_FIELD_MASK(reg, field)

#define REG_SET_FIELD(orig_val, reg, field, field_val)

#define REG_GET_FIELD(value, reg, field)

#define WREG32_FIELD(reg, field, val)

#define WREG32_FIELD_OFFSET(reg, offset, field, val)

#define AMDGPU_GET_REG_FIELD(x, h, l)
/*
 * BIOS helpers.
 */
#define RBIOS8(i)
#define RBIOS16(i)
#define RBIOS32(i)

/*
 * ASICs macro.
 */
#define amdgpu_asic_set_vga_state(adev, state)
#define amdgpu_asic_reset(adev)
#define amdgpu_asic_reset_method(adev)
#define amdgpu_asic_get_xclk(adev)
#define amdgpu_asic_set_uvd_clocks(adev, v, d)
#define amdgpu_asic_set_vce_clocks(adev, ev, ec)
#define amdgpu_get_pcie_lanes(adev)
#define amdgpu_set_pcie_lanes(adev, l)
#define amdgpu_asic_get_gpu_clock_counter(adev)
#define amdgpu_asic_read_disabled_bios(adev)
#define amdgpu_asic_read_bios_from_rom(adev, b, l)
#define amdgpu_asic_read_register(adev, se, sh, offset, v)
#define amdgpu_asic_get_config_memsize(adev)
#define amdgpu_asic_flush_hdp(adev, r)
#define amdgpu_asic_invalidate_hdp(adev, r)
#define amdgpu_asic_need_full_reset(adev)
#define amdgpu_asic_init_doorbell_index(adev)
#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1)
#define amdgpu_asic_need_reset_on_init(adev)
#define amdgpu_asic_get_pcie_replay_count(adev)
#define amdgpu_asic_supports_baco(adev)
#define amdgpu_asic_pre_asic_init(adev)
#define amdgpu_asic_update_umd_stable_pstate(adev, enter)
#define amdgpu_asic_query_video_codecs(adev, e, c)

#define amdgpu_inc_vram_lost(adev)

#define BIT_MASK_UPPER(i)
#define for_each_inst(i, inst_mask)

/* Common functions */
bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
			      struct amdgpu_job *job,
			      struct amdgpu_reset_context *reset_context);
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);

void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
				  u64 num_vis_bytes);
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
					     const u32 *registers,
					     const u32 array_size);

int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
bool amdgpu_device_supports_atpx(struct drm_device *dev);
bool amdgpu_device_supports_px(struct drm_device *dev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
int amdgpu_device_supports_baco(struct drm_device *dev);
void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
				      struct amdgpu_device *peer_adev);
int amdgpu_device_baco_enter(struct drm_device *dev);
int amdgpu_device_baco_exit(struct drm_device *dev);

void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
		struct amdgpu_ring *ring);
void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
		struct amdgpu_ring *ring);

void amdgpu_device_halt(struct amdgpu_device *adev);
u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
				u32 reg);
void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
				u32 reg, u32 v);
struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev);
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
					    struct dma_fence *gang);
bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);

/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
void amdgpu_register_atpx_handler(void);
void amdgpu_unregister_atpx_handler(void);
bool amdgpu_has_atpx_dgpu_power_cntl(void);
bool amdgpu_is_atpx_hybrid(void);
bool amdgpu_atpx_dgpu_req_power_for_displays(void);
bool amdgpu_has_atpx(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
static inline bool amdgpu_has_atpx(void) { return false; }
#endif

#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
void *amdgpu_atpx_get_dhandle(void);
#else
static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
#endif

/*
 * KMS
 */
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
extern const int amdgpu_max_kms_ioctl;

int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
void amdgpu_driver_unload_kms(struct drm_device *dev);
void amdgpu_driver_lastclose_kms(struct drm_device *dev);
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void amdgpu_driver_postclose_kms(struct drm_device *dev,
				 struct drm_file *file_priv);
void amdgpu_driver_release_kms(struct drm_device *dev);

int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_prepare(struct drm_device *dev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
int amdgpu_info_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *filp);

/*
 * functions used by amdgpu_encoder.c
 */
struct amdgpu_afmt_acr {};

struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);

/* amdgpu_acpi.c */

struct amdgpu_numa_info {};

/* ATCS Device/Driver State */
#define AMDGPU_ATCS_PSC_DEV_STATE_D0
#define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT
#define AMDGPU_ATCS_PSC_DRV_STATE_OPR
#define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR

#if defined(CONFIG_ACPI)
int amdgpu_acpi_init(struct amdgpu_device *adev);
void amdgpu_acpi_fini(struct amdgpu_device *adev);
bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
bool amdgpu_acpi_is_power_shift_control_supported(void);
int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
						u8 perf_req, bool advertise);
int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
				    u8 dev_state, bool drv_state);
int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
			     u64 *tmr_size);
int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id,
			     struct amdgpu_numa_info *numa_info);

void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
void amdgpu_acpi_detect(void);
void amdgpu_acpi_release(void);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev,
					   u64 *tmr_offset, u64 *tmr_size)
{
	return -EINVAL;
}
static inline int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev,
					   int xcc_id,
					   struct amdgpu_numa_info *numa_info)
{
	return -EINVAL;
}
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_acpi_detect(void) { }
static inline void amdgpu_acpi_release(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
						  u8 dev_state, bool drv_state) { return 0; }
static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
						 enum amdgpu_ss ss_state) { return 0; }
static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { }
#endif

#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
#endif

#if defined(CONFIG_DRM_AMD_DC)
int amdgpu_dm_display_resume(struct amdgpu_device *adev );
#else
static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
#endif


void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);

pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev,
					   pci_channel_state_t state);
pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev);
pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev);
void amdgpu_pci_resume(struct pci_dev *pdev);

bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
bool amdgpu_device_load_pci_state(struct pci_dev *pdev);

bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev);

int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
			       enum amd_clockgating_state state);
int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
			       enum amd_powergating_state state);

static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev)
{}

#include "amdgpu_object.h"

static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
{}

int amdgpu_in_reset(struct amdgpu_device *adev);

extern const struct attribute_group amdgpu_vram_mgr_attr_group;
extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
extern const struct attribute_group amdgpu_flash_attr_group;

#endif