/* * Copyright 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #include <linux/firmware.h> #include <linux/module.h> #include <linux/pci.h> #include <drm/drm_cache.h> #include "amdgpu.h" #include "gmc_v6_0.h" #include "amdgpu_ucode.h" #include "amdgpu_gem.h" #include "bif/bif_3_0_d.h" #include "bif/bif_3_0_sh_mask.h" #include "oss/oss_1_0_d.h" #include "oss/oss_1_0_sh_mask.h" #include "gmc/gmc_6_0_d.h" #include "gmc/gmc_6_0_sh_mask.h" #include "dce/dce_6_0_d.h" #include "dce/dce_6_0_sh_mask.h" #include "si_enums.h" static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev); static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev); static int gmc_v6_0_wait_for_idle(void *handle); MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; #define MC_SEQ_MISC0__MT__MASK … #define MC_SEQ_MISC0__MT__GDDR1 … #define MC_SEQ_MISC0__MT__DDR2 … #define MC_SEQ_MISC0__MT__GDDR3 … #define MC_SEQ_MISC0__MT__GDDR4 … #define MC_SEQ_MISC0__MT__GDDR5 … #define MC_SEQ_MISC0__MT__HBM … #define MC_SEQ_MISC0__MT__DDR3 … static void gmc_v6_0_mc_stop(struct amdgpu_device *adev) { … } static void gmc_v6_0_mc_resume(struct amdgpu_device *adev) { … } static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) { … } static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev) { … } static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) { … } static void gmc_v6_0_mc_program(struct amdgpu_device *adev) { … } static int gmc_v6_0_mc_init(struct amdgpu_device *adev) { … } static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, uint32_t vmhub, uint32_t flush_type) { … } static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, unsigned int vmid, uint64_t pd_addr) { … } static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { … } static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping, uint64_t *flags) { … } static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { … } /** * gmc_v8_0_set_prt() - set PRT VM fault * * @adev: amdgpu_device pointer * @enable: enable/disable VM fault handling for PRT */ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable) { … } static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) { … } static int gmc_v6_0_gart_init(struct amdgpu_device *adev) { … } static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) { … } static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, u32 addr, u32 mc_client) { … } /* static const u32 mc_cg_registers[] = { MC_HUB_MISC_HUB_CG, MC_HUB_MISC_SIP_CG, MC_HUB_MISC_VM_CG, MC_XPB_CLK_GAT, ATC_MISC_CG, MC_CITF_MISC_WR_CG, MC_CITF_MISC_RD_CG, MC_CITF_MISC_VM_CG, VM_L2_CG, }; static const u32 mc_cg_ls_en[] = { MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK, MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK, MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK, MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK, ATC_MISC_CG__MEM_LS_ENABLE_MASK, MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK, MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK, MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK, VM_L2_CG__MEM_LS_ENABLE_MASK, }; static const u32 mc_cg_en[] = { MC_HUB_MISC_HUB_CG__ENABLE_MASK, MC_HUB_MISC_SIP_CG__ENABLE_MASK, MC_HUB_MISC_VM_CG__ENABLE_MASK, MC_XPB_CLK_GAT__ENABLE_MASK, ATC_MISC_CG__ENABLE_MASK, MC_CITF_MISC_WR_CG__ENABLE_MASK, MC_CITF_MISC_RD_CG__ENABLE_MASK, MC_CITF_MISC_VM_CG__ENABLE_MASK, VM_L2_CG__ENABLE_MASK, }; static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev, bool enable) { int i; u32 orig, data; for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { orig = data = RREG32(mc_cg_registers[i]); if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS)) data |= mc_cg_ls_en[i]; else data &= ~mc_cg_ls_en[i]; if (data != orig) WREG32(mc_cg_registers[i], data); } } static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev, bool enable) { int i; u32 orig, data; for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { orig = data = RREG32(mc_cg_registers[i]); if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG)) data |= mc_cg_en[i]; else data &= ~mc_cg_en[i]; if (data != orig) WREG32(mc_cg_registers[i], data); } } static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev, bool enable) { u32 orig, data; orig = data = RREG32_PCIE(ixPCIE_CNTL2); if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) { data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1); } else { data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0); data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0); data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0); data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0); } if (orig != data) WREG32_PCIE(ixPCIE_CNTL2, data); } static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev, bool enable) { u32 orig, data; orig = data = RREG32(mmHDP_HOST_PATH_CNTL); if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); else data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); if (orig != data) WREG32(mmHDP_HOST_PATH_CNTL, data); } static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev, bool enable) { u32 orig, data; orig = data = RREG32(mmHDP_MEM_POWER_LS); if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); else data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); if (orig != data) WREG32(mmHDP_MEM_POWER_LS, data); } */ static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type) { … } static int gmc_v6_0_early_init(void *handle) { … } static int gmc_v6_0_late_init(void *handle) { … } static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) { … } static int gmc_v6_0_sw_init(void *handle) { … } static int gmc_v6_0_sw_fini(void *handle) { … } static int gmc_v6_0_hw_init(void *handle) { … } static int gmc_v6_0_hw_fini(void *handle) { … } static int gmc_v6_0_suspend(void *handle) { … } static int gmc_v6_0_resume(void *handle) { … } static bool gmc_v6_0_is_idle(void *handle) { … } static int gmc_v6_0_wait_for_idle(void *handle) { … } static int gmc_v6_0_soft_reset(void *handle) { … } static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned int type, enum amdgpu_interrupt_state state) { … } static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { … } static int gmc_v6_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { … } static int gmc_v6_0_set_powergating_state(void *handle, enum amd_powergating_state state) { … } static const struct amd_ip_funcs gmc_v6_0_ip_funcs = …; static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = …; static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = …; static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev) { … } static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev) { … } const struct amdgpu_ip_block_version gmc_v6_0_ip_block = …;