/* * Copyright 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher */ #include <linux/delay.h> #include <linux/firmware.h> #include <linux/module.h> #include "amdgpu.h" #include "amdgpu_ucode.h" #include "amdgpu_trace.h" #include "vi.h" #include "vid.h" #include "oss/oss_3_0_d.h" #include "oss/oss_3_0_sh_mask.h" #include "gmc/gmc_8_1_d.h" #include "gmc/gmc_8_1_sh_mask.h" #include "gca/gfx_8_0_d.h" #include "gca/gfx_8_0_enum.h" #include "gca/gfx_8_0_sh_mask.h" #include "bif/bif_5_0_d.h" #include "bif/bif_5_0_sh_mask.h" #include "tonga_sdma_pkt_open.h" #include "ivsrcid/ivsrcid_vislands30.h" static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev); static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev); MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; MODULE_FIRMWARE(…) …; static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = …; static const u32 golden_settings_tonga_a11[] = …; static const u32 tonga_mgcg_cgcg_init[] = …; static const u32 golden_settings_fiji_a10[] = …; static const u32 fiji_mgcg_cgcg_init[] = …; static const u32 golden_settings_polaris11_a11[] = …; static const u32 golden_settings_polaris10_a11[] = …; static const u32 cz_golden_settings_a11[] = …; static const u32 cz_mgcg_cgcg_init[] = …; static const u32 stoney_golden_settings_a11[] = …; static const u32 stoney_mgcg_cgcg_init[] = …; /* * sDMA - System DMA * Starting with CIK, the GPU has new asynchronous * DMA engines. These engines are used for compute * and gfx. There are two DMA engines (SDMA0, SDMA1) * and each one supports 1 ring buffer used for gfx * and 2 queues used for compute. * * The programming model is very similar to the CP * (ring buffer, IBs, etc.), but sDMA has it's own * packet format that is different from the PM4 format * used by the CP. sDMA supports copying data, writing * embedded data, solid fills, and a number of other * things. It also has support for tiling/detiling of * buffers. */ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) { … } static void sdma_v3_0_free_microcode(struct amdgpu_device *adev) { … } /** * sdma_v3_0_init_microcode - load ucode images from disk * * @adev: amdgpu_device pointer * * Use the firmware interface to load the ucode images into * the driver (not loaded into hw). * Returns 0 on success, error on failure. */ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) { … } /** * sdma_v3_0_ring_get_rptr - get the current read pointer * * @ring: amdgpu ring pointer * * Get the current rptr from the hardware (VI+). */ static uint64_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring) { … } /** * sdma_v3_0_ring_get_wptr - get the current write pointer * * @ring: amdgpu ring pointer * * Get the current wptr from the hardware (VI+). */ static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring) { … } /** * sdma_v3_0_ring_set_wptr - commit the write pointer * * @ring: amdgpu ring pointer * * Write the wptr back to the hardware (VI+). */ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring) { … } static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { … } /** * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine * * @ring: amdgpu ring pointer * @job: job to retrieve vmid from * @ib: IB object to schedule * @flags: unused * * Schedule an IB in the DMA ring (VI). */ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) { … } /** * sdma_v3_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring * * @ring: amdgpu ring pointer * * Emit an hdp flush packet on the requested DMA ring. */ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { … } /** * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer * @addr: address * @seq: sequence number * @flags: fence related flags * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate * an interrupt if needed (VI). */ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) { … } /** * sdma_v3_0_gfx_stop - stop the gfx async dma engines * * @adev: amdgpu_device pointer * * Stop the gfx async dma ring buffers (VI). */ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) { … } /** * sdma_v3_0_rlc_stop - stop the compute async dma engines * * @adev: amdgpu_device pointer * * Stop the compute async dma queues (VI). */ static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev) { … } /** * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs context switch. * * Halt or unhalt the async dma engines context switch (VI). */ static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) { … } /** * sdma_v3_0_enable - stop the async dma engines * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs. * * Halt or unhalt the async dma engines (VI). */ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable) { … } /** * sdma_v3_0_gfx_resume - setup and start the async dma engines * * @adev: amdgpu_device pointer * * Set up the gfx DMA ring buffers and enable them (VI). * Returns 0 for success, error for failure. */ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) { … } /** * sdma_v3_0_rlc_resume - setup and start the async dma engines * * @adev: amdgpu_device pointer * * Set up the compute DMA queues and enable them (VI). * Returns 0 for success, error for failure. */ static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev) { … } /** * sdma_v3_0_start - setup and start the async dma engines * * @adev: amdgpu_device pointer * * Set up the DMA engines and enable them (VI). * Returns 0 for success, error for failure. */ static int sdma_v3_0_start(struct amdgpu_device *adev) { … } /** * sdma_v3_0_ring_test_ring - simple async dma engine test * * @ring: amdgpu_ring structure holding ring information * * Test the DMA engine by writing using it to write an * value to memory. (VI). * Returns 0 for success, error for failure. */ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) { … } /** * sdma_v3_0_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. */ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { … } /** * sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @src: src addr to copy from * @count: number of page entries to update * * Update PTEs by copying them from the GART using sDMA (CIK). */ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) { … } /** * sdma_v3_0_vm_write_pte - update PTEs by writing them manually * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * * Update PTEs by writing them manually using sDMA (CIK). */ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t value, unsigned count, uint32_t incr) { … } /** * sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: access flags * * Update the page tables using sDMA (CIK). */ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) { … } /** * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw * * @ring: amdgpu_ring structure holding ring information * @ib: indirect buffer to fill with padding * */ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { … } /** * sdma_v3_0_ring_emit_pipeline_sync - sync the pipeline * * @ring: amdgpu_ring pointer * * Make sure all previous operations are completed (CIK). */ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) { … } /** * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA * * @ring: amdgpu_ring pointer * @vmid: vmid number to use * @pd_addr: address * * Update the page table base and flush the VM TLB * using sDMA (VI). */ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { … } static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) { … } static int sdma_v3_0_early_init(void *handle) { … } static int sdma_v3_0_sw_init(void *handle) { … } static int sdma_v3_0_sw_fini(void *handle) { … } static int sdma_v3_0_hw_init(void *handle) { … } static int sdma_v3_0_hw_fini(void *handle) { … } static int sdma_v3_0_suspend(void *handle) { … } static int sdma_v3_0_resume(void *handle) { … } static bool sdma_v3_0_is_idle(void *handle) { … } static int sdma_v3_0_wait_for_idle(void *handle) { … } static bool sdma_v3_0_check_soft_reset(void *handle) { … } static int sdma_v3_0_pre_soft_reset(void *handle) { … } static int sdma_v3_0_post_soft_reset(void *handle) { … } static int sdma_v3_0_soft_reset(void *handle) { … } static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, enum amdgpu_interrupt_state state) { … } static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { … } static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { … } static void sdma_v3_0_update_sdma_medium_grain_clock_gating( struct amdgpu_device *adev, bool enable) { … } static void sdma_v3_0_update_sdma_medium_grain_light_sleep( struct amdgpu_device *adev, bool enable) { … } static int sdma_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { … } static int sdma_v3_0_set_powergating_state(void *handle, enum amd_powergating_state state) { … } static void sdma_v3_0_get_clockgating_state(void *handle, u64 *flags) { … } static const struct amd_ip_funcs sdma_v3_0_ip_funcs = …; static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = …; static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) { … } static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = …; static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = …; static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) { … } /** * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine * * @ib: indirect buffer to copy to * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer * @copy_flags: unused * * Copy GPU buffers using the DMA engine (VI). * Used by the amdgpu ttm implementation to move pages if * registered as the asic copy callback. */ static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, uint32_t copy_flags) { … } /** * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine * * @ib: indirect buffer to copy to * @src_data: value to write to buffer * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer * * Fill GPU buffers using the DMA engine (VI). */ static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib, uint32_t src_data, uint64_t dst_offset, uint32_t byte_count) { … } static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = …; static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev) { … } static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = …; static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) { … } const struct amdgpu_ip_block_version sdma_v3_0_ip_block = …; const struct amdgpu_ip_block_version sdma_v3_1_ip_block = …;