/* * Copyright 2010 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher */ #include "radeon.h" #include "radeon_asic.h" #include "radeon_trace.h" #include "ni.h" #include "nid.h" /* * DMA * Starting with R600, the GPU has an asynchronous * DMA engine. The programming model is very similar * to the 3D engine (ring buffer, IBs, etc.), but the * DMA controller has it's own packet format that is * different form the PM4 format used by the 3D engine. * It supports copying data, writing embedded data, * solid fills, and a number of other things. It also * has support for tiling/detiling of buffers. * Cayman and newer support two asynchronous DMA engines. */ /** * cayman_dma_get_rptr - get the current read pointer * * @rdev: radeon_device pointer * @ring: radeon ring pointer * * Get the current rptr from the hardware (cayman+). */ uint32_t cayman_dma_get_rptr(struct radeon_device *rdev, struct radeon_ring *ring) { … } /** * cayman_dma_get_wptr - get the current write pointer * * @rdev: radeon_device pointer * @ring: radeon ring pointer * * Get the current wptr from the hardware (cayman+). */ uint32_t cayman_dma_get_wptr(struct radeon_device *rdev, struct radeon_ring *ring) { … } /** * cayman_dma_set_wptr - commit the write pointer * * @rdev: radeon_device pointer * @ring: radeon ring pointer * * Write the wptr back to the hardware (cayman+). */ void cayman_dma_set_wptr(struct radeon_device *rdev, struct radeon_ring *ring) { … } /** * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine * * @rdev: radeon_device pointer * @ib: IB object to schedule * * Schedule an IB in the DMA ring (cayman-SI). */ void cayman_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { … } /** * cayman_dma_stop - stop the async dma engines * * @rdev: radeon_device pointer * * Stop the async dma engines (cayman-SI). */ void cayman_dma_stop(struct radeon_device *rdev) { … } /** * cayman_dma_resume - setup and start the async dma engines * * @rdev: radeon_device pointer * * Set up the DMA ring buffers and enable them. (cayman-SI). * Returns 0 for success, error for failure. */ int cayman_dma_resume(struct radeon_device *rdev) { … } /** * cayman_dma_fini - tear down the async dma engines * * @rdev: radeon_device pointer * * Stop the async dma engines and free the rings (cayman-SI). */ void cayman_dma_fini(struct radeon_device *rdev) { … } /** * cayman_dma_is_lockup - Check if the DMA engine is locked up * * @rdev: radeon_device pointer * @ring: radeon_ring structure holding ring information * * Check if the async DMA engine is locked up. * Returns true if the engine appears to be locked up, false if not. */ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) { … } /** * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @src: src addr where to copy from * @count: number of page entries to update * * Update PTEs by copying them from the GART using the DMA (cayman/TN). */ void cayman_dma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t src, unsigned count) { … } /** * cayman_dma_vm_write_pages - update PTEs by writing them manually * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: hw access flags * * Update PTEs by writing them manually using the DMA (cayman/TN). */ void cayman_dma_vm_write_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { … } /** * cayman_dma_vm_set_pages - update the page tables using the DMA * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: hw access flags * * Update the page tables using the DMA (cayman/TN). */ void cayman_dma_vm_set_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { … } /** * cayman_dma_vm_pad_ib - pad the IB to the required number of dw * * @ib: indirect buffer to fill with padding * */ void cayman_dma_vm_pad_ib(struct radeon_ib *ib) { … } void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, unsigned vm_id, uint64_t pd_addr) { … }