/* * Copyright 2009 Jerome Glisse. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * */ /* * Authors: * Jerome Glisse <[email protected]> * Dave Airlie */ #include <linux/atomic.h> #include <linux/debugfs.h> #include <linux/firmware.h> #include <linux/kref.h> #include <linux/sched/signal.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/wait.h> #include <drm/drm_device.h> #include <drm/drm_file.h> #include "radeon.h" #include "radeon_reg.h" #include "radeon_trace.h" /* * Fences mark an event in the GPUs pipeline and are used * for GPU/CPU synchronization. When the fence is written, * it is expected that all buffers associated with that fence * are no longer in use by the associated ring on the GPU and * that the relevant GPU caches have been flushed. Whether * we use a scratch register or memory location depends on the asic * and whether writeback is enabled. */ /** * radeon_fence_write - write a fence value * * @rdev: radeon_device pointer * @seq: sequence number to write * @ring: ring index the fence is associated with * * Writes a fence value to memory or a scratch register (all asics). */ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) { … } /** * radeon_fence_read - read a fence value * * @rdev: radeon_device pointer * @ring: ring index the fence is associated with * * Reads a fence value from memory or a scratch register (all asics). * Returns the value of the fence read from memory or register. */ static u32 radeon_fence_read(struct radeon_device *rdev, int ring) { … } /** * radeon_fence_schedule_check - schedule lockup check * * @rdev: radeon_device pointer * @ring: ring index we should work with * * Queues a delayed work item to check for lockups. */ static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring) { … } /** * radeon_fence_emit - emit a fence on the requested ring * * @rdev: radeon_device pointer * @fence: radeon fence object * @ring: ring index the fence is associated with * * Emits a fence command on the requested ring (all asics). * Returns 0 on success, -ENOMEM on failure. */ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring) { … } /* * radeon_fence_check_signaled - callback from fence_queue * * this function is called with fence_queue lock held, which is also used * for the fence locking itself, so unlocked variants are used for * fence_signal, and remove_wait_queue. */ static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key) { … } /** * radeon_fence_activity - check for fence activity * * @rdev: radeon_device pointer * @ring: ring index the fence is associated with * * Checks the current fence value and calculates the last * signalled fence value. Returns true if activity occured * on the ring, and the fence_queue should be waken up. */ static bool radeon_fence_activity(struct radeon_device *rdev, int ring) { … } /** * radeon_fence_check_lockup - check for hardware lockup * * @work: delayed work item * * Checks for fence activity and if there is none probe * the hardware if a lockup occured. */ static void radeon_fence_check_lockup(struct work_struct *work) { … } /** * radeon_fence_process - process a fence * * @rdev: radeon_device pointer * @ring: ring index the fence is associated with * * Checks the current fence value and wakes the fence queue * if the sequence number has increased (all asics). */ void radeon_fence_process(struct radeon_device *rdev, int ring) { … } /** * radeon_fence_seq_signaled - check if a fence sequence number has signaled * * @rdev: radeon device pointer * @seq: sequence number * @ring: ring index the fence is associated with * * Check if the last signaled fence sequnce number is >= the requested * sequence number (all asics). * Returns true if the fence has signaled (current fence value * is >= requested value) or false if it has not (current fence * value is < the requested value. Helper function for * radeon_fence_signaled(). */ static bool radeon_fence_seq_signaled(struct radeon_device *rdev, u64 seq, unsigned int ring) { … } static bool radeon_fence_is_signaled(struct dma_fence *f) { … } /** * radeon_fence_enable_signaling - enable signalling on fence * @f: fence * * This function is called with fence_queue lock held, and adds a callback * to fence_queue that checks if this fence is signaled, and if so it * signals the fence and removes itself. */ static bool radeon_fence_enable_signaling(struct dma_fence *f) { … } /** * radeon_fence_signaled - check if a fence has signaled * * @fence: radeon fence object * * Check if the requested fence has signaled (all asics). * Returns true if the fence has signaled or false if it has not. */ bool radeon_fence_signaled(struct radeon_fence *fence) { … } /** * radeon_fence_any_seq_signaled - check if any sequence number is signaled * * @rdev: radeon device pointer * @seq: sequence numbers * * Check if the last signaled fence sequnce number is >= the requested * sequence number (all asics). * Returns true if any has signaled (current value is >= requested value) * or false if it has not. Helper function for radeon_fence_wait_seq. */ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) { … } /** * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers * * @rdev: radeon device pointer * @target_seq: sequence number(s) we want to wait for * @intr: use interruptable sleep * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait * * Wait for the requested sequence number(s) to be written by any ring * (all asics). Sequnce number array is indexed by ring id. * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the sequence number. Helper function * for radeon_fence_wait_*(). * Returns remaining time if the sequence number has passed, 0 when * the wait timeout, or an error for all other cases. * -EDEADLK is returned when a GPU lockup has been detected. */ static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev, u64 *target_seq, bool intr, long timeout) { … } /** * radeon_fence_wait_timeout - wait for a fence to signal with timeout * * @fence: radeon fence object * @intr: use interruptible sleep * * Wait for the requested fence to signal (all asics). * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the fence. * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait * Returns remaining time if the sequence number has passed, 0 when * the wait timeout, or an error for all other cases. */ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout) { … } /** * radeon_fence_wait - wait for a fence to signal * * @fence: radeon fence object * @intr: use interruptible sleep * * Wait for the requested fence to signal (all asics). * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the fence. * Returns 0 if the fence has passed, error for all other cases. */ int radeon_fence_wait(struct radeon_fence *fence, bool intr) { … } /** * radeon_fence_wait_any - wait for a fence to signal on any ring * * @rdev: radeon device pointer * @fences: radeon fence object(s) * @intr: use interruptable sleep * * Wait for any requested fence to signal (all asics). Fence * array is indexed by ring id. @intr selects whether to use * interruptable (true) or non-interruptable (false) sleep when * waiting for the fences. Used by the suballocator. * Returns 0 if any fence has passed, error for all other cases. */ int radeon_fence_wait_any(struct radeon_device *rdev, struct radeon_fence **fences, bool intr) { … } /** * radeon_fence_wait_next - wait for the next fence to signal * * @rdev: radeon device pointer * @ring: ring index the fence is associated with * * Wait for the next fence on the requested ring to signal (all asics). * Returns 0 if the next fence has passed, error for all other cases. * Caller must hold ring lock. */ int radeon_fence_wait_next(struct radeon_device *rdev, int ring) { … } /** * radeon_fence_wait_empty - wait for all fences to signal * * @rdev: radeon device pointer * @ring: ring index the fence is associated with * * Wait for all fences on the requested ring to signal (all asics). * Returns 0 if the fences have passed, error for all other cases. * Caller must hold ring lock. */ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) { … } /** * radeon_fence_ref - take a ref on a fence * * @fence: radeon fence object * * Take a reference on a fence (all asics). * Returns the fence. */ struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) { … } /** * radeon_fence_unref - remove a ref on a fence * * @fence: radeon fence object * * Remove a reference on a fence (all asics). */ void radeon_fence_unref(struct radeon_fence **fence) { … } /** * radeon_fence_count_emitted - get the count of emitted fences * * @rdev: radeon device pointer * @ring: ring index the fence is associated with * * Get the number of fences emitted on the requested ring (all asics). * Returns the number of emitted fences on the ring. Used by the * dynpm code to ring track activity. */ unsigned int radeon_fence_count_emitted(struct radeon_device *rdev, int ring) { … } /** * radeon_fence_need_sync - do we need a semaphore * * @fence: radeon fence object * @dst_ring: which ring to check against * * Check if the fence needs to be synced against another ring * (all asics). If so, we need to emit a semaphore. * Returns true if we need to sync with another ring, false if * not. */ bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) { … } /** * radeon_fence_note_sync - record the sync point * * @fence: radeon fence object * @dst_ring: which ring to check against * * Note the sequence number at which point the fence will * be synced with the requested ring (all asics). */ void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) { … } /** * radeon_fence_driver_start_ring - make the fence driver * ready for use on the requested ring. * * @rdev: radeon device pointer * @ring: ring index to start the fence driver on * * Make the fence driver ready for processing (all asics). * Not all asics have all rings, so each asic will only * start the fence driver on the rings it has. * Returns 0 for success, errors for failure. */ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) { … } /** * radeon_fence_driver_init_ring - init the fence driver * for the requested ring. * * @rdev: radeon device pointer * @ring: ring index to start the fence driver on * * Init the fence driver for the requested ring (all asics). * Helper function for radeon_fence_driver_init(). */ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) { … } /** * radeon_fence_driver_init - init the fence driver * for all possible rings. * * @rdev: radeon device pointer * * Init the fence driver for all possible rings (all asics). * Not all asics have all rings, so each asic will only * start the fence driver on the rings it has using * radeon_fence_driver_start_ring(). */ void radeon_fence_driver_init(struct radeon_device *rdev) { … } /** * radeon_fence_driver_fini - tear down the fence driver * for all possible rings. * * @rdev: radeon device pointer * * Tear down the fence driver for all possible rings (all asics). */ void radeon_fence_driver_fini(struct radeon_device *rdev) { … } /** * radeon_fence_driver_force_completion - force all fence waiter to complete * * @rdev: radeon device pointer * @ring: the ring to complete * * In case of GPU reset failure make sure no process keep waiting on fence * that will never complete. */ void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring) { … } /* * Fence debugfs */ #if defined(CONFIG_DEBUG_FS) static int radeon_debugfs_fence_info_show(struct seq_file *m, void *data) { … } /* * radeon_debugfs_gpu_reset - manually trigger a gpu reset * * Manually trigger a gpu reset at the next fence wait. */ static int radeon_debugfs_gpu_reset(void *data, u64 *val) { … } DEFINE_SHOW_ATTRIBUTE(…); DEFINE_DEBUGFS_ATTRIBUTE(…); #endif void radeon_debugfs_fence_init(struct radeon_device *rdev) { … } static const char *radeon_fence_get_driver_name(struct dma_fence *fence) { … } static const char *radeon_fence_get_timeline_name(struct dma_fence *f) { … } static inline bool radeon_test_signaled(struct radeon_fence *fence) { … } struct radeon_wait_cb { … }; static void radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { … } static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr, signed long t) { … } const struct dma_fence_ops radeon_fence_ops = …;