linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c

/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#include <linux/firmware.h>
#include <linux/pm_runtime.h>

#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
#include "amdgpu_xcp.h"
#include "amdgpu_xgmi.h"

/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE

#define GFX_OFF_NO_DELAY

/*
 * GPU GFX IP block helpers function.
 */

int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
				int pipe, int queue)
{}

void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
				 int *mec, int *pipe, int *queue)
{}

bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
				     int xcc_id, int mec, int pipe, int queue)
{}

int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
			       int me, int pipe, int queue)
{}

void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
				int *me, int *pipe, int *queue)
{}

bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
				    int me, int pipe, int queue)
{}

/**
 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
 *
 * @mask: array in which the per-shader array disable masks will be stored
 * @max_se: number of SEs
 * @max_sh: number of SHs
 *
 * The bitmask of CUs to be disabled in the shader array determined by se and
 * sh is stored in mask[se * max_sh + sh].
 */
void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh)
{}

static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
{}

static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
{}

bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
						struct amdgpu_ring *ring)
{}

bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
					       struct amdgpu_ring *ring)
{}

void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{}

void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
{}

static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
				  struct amdgpu_ring *ring, int xcc_id)
{}

int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id)
{}

void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
{}

void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
{}

int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
			unsigned int hpd_size, int xcc_id)
{}

/* create MQD for each compute/gfx queue */
int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
			   unsigned int mqd_size, int xcc_id)
{}

void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id)
{}

int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
{}

int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
{}

int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
					int queue_bit)
{}

static int amdgpu_gfx_mes_enable_kcq(struct amdgpu_device *adev, int xcc_id)
{}

int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
{}

int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
{}

/* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
 *
 * @adev: amdgpu_device pointer
 * @bool enable true: enable gfx off feature, false: disable gfx off feature
 *
 * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
 * 2. other client can send request to disable gfx off feature, the request should be honored.
 * 3. other client can cancel their request of disable gfx off feature
 * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
 */

void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
{}

int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
{}

int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
{}

int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
{}

int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
{}

int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{}

int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
{}

int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
						struct amdgpu_iv_entry *entry)
{}

int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
		void *err_data,
		struct amdgpu_iv_entry *entry)
{}

int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
				  struct amdgpu_irq_src *source,
				  struct amdgpu_iv_entry *entry)
{}

void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
		void *ras_error_status,
		void (*func)(struct amdgpu_device *adev, void *ras_error_status,
				int xcc_id))
{}

uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id)
{}

void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id)
{}

int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
{}

void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
				  uint32_t ucode_id)
{}

bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
{}

static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
						struct device_attribute *addr,
						char *buf)
{}

static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
						struct device_attribute *addr,
						const char *buf, size_t count)
{}

static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
						struct device_attribute *addr,
						char *buf)
{}

static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
{}

static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
{}

static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
						 struct device_attribute *attr,
						 const char *buf,
						 size_t count)
{}

static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
						struct device_attribute *attr,
						char *buf)
{}

static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
						struct device_attribute *attr,
						const char *buf, size_t count)
{}

static DEVICE_ATTR(run_cleaner_shader, 0200,
		   NULL, amdgpu_gfx_set_run_cleaner_shader);

static DEVICE_ATTR(enforce_isolation, 0644,
		   amdgpu_gfx_get_enforce_isolation,
		   amdgpu_gfx_set_enforce_isolation);

static DEVICE_ATTR(current_compute_partition, 0644,
		   amdgpu_gfx_get_current_compute_partition,
		   amdgpu_gfx_set_compute_partition);

static DEVICE_ATTR(available_compute_partition, 0444,
		   amdgpu_gfx_get_available_compute_partition, NULL);

int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
{}

void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
{}

int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
{}

void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
{}

int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
				      unsigned int cleaner_shader_size)
{}

void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev)
{}

void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
				    unsigned int cleaner_shader_size,
				    const void *cleaner_shader_ptr)
{}

/**
 * amdgpu_gfx_kfd_sch_ctrl - Control the KFD scheduler from the KGD (Graphics Driver)
 * @adev: amdgpu_device pointer
 * @idx: Index of the scheduler to control
 * @enable: Whether to enable or disable the KFD scheduler
 *
 * This function is used to control the KFD (Kernel Fusion Driver) scheduler
 * from the KGD. It is part of the cleaner shader feature. This function plays
 * a key role in enforcing process isolation on the GPU.
 *
 * The function uses a reference count mechanism (kfd_sch_req_count) to keep
 * track of the number of requests to enable the KFD scheduler. When a request
 * to enable the KFD scheduler is made, the reference count is decremented.
 * When the reference count reaches zero, a delayed work is scheduled to
 * enforce isolation after a delay of GFX_SLICE_PERIOD.
 *
 * When a request to disable the KFD scheduler is made, the function first
 * checks if the reference count is zero. If it is, it cancels the delayed work
 * for enforcing isolation and checks if the KFD scheduler is active. If the
 * KFD scheduler is active, it sends a request to stop the KFD scheduler and
 * sets the KFD scheduler state to inactive. Then, it increments the reference
 * count.
 *
 * The function is synchronized using the kfd_sch_mutex to ensure that the KFD
 * scheduler state and reference count are updated atomically.
 *
 * Note: If the reference count is already zero when a request to enable the
 * KFD scheduler is made, it means there's an imbalance bug somewhere. The
 * function triggers a warning in this case.
 */
static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
				    bool enable)
{}

/**
 * amdgpu_gfx_enforce_isolation_handler - work handler for enforcing shader isolation
 *
 * @work: work_struct.
 *
 * This function is the work handler for enforcing shader isolation on AMD GPUs.
 * It counts the number of emitted fences for each GFX and compute ring. If there
 * are any fences, it schedules the `enforce_isolation_work` to be run after a
 * delay of `GFX_SLICE_PERIOD`. If there are no fences, it signals the Kernel Fusion
 * Driver (KFD) to resume the runqueue. The function is synchronized using the
 * `enforce_isolation_mutex`.
 */
void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
{}

void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
{}

void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
{}