linux/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c

// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
 * Copyright 2014-2022 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#include <linux/ratelimit.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/sched.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_mqd_manager.h"
#include "cik_regs.h"
#include "kfd_kernel_queue.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_reset.h"
#include "mes_v11_api_def.h"
#include "kfd_debug.h"

/* Size of the per-pipe EOP queue */
#define CIK_HPD_EOP_BYTES_LOG2
#define CIK_HPD_EOP_BYTES

static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
				  u32 pasid, unsigned int vmid);

static int execute_queues_cpsch(struct device_queue_manager *dqm,
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param,
				uint32_t grace_period);
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param,
				uint32_t grace_period,
				bool reset);

static int map_queues_cpsch(struct device_queue_manager *dqm);

static void deallocate_sdma_queue(struct device_queue_manager *dqm,
				struct queue *q);

static inline void deallocate_hqd(struct device_queue_manager *dqm,
				struct queue *q);
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
static int allocate_sdma_queue(struct device_queue_manager *dqm,
				struct queue *q, const uint32_t *restore_sdma_id);
static void kfd_process_hw_exception(struct work_struct *work);

static inline
enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
{}

static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
{}

unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
{}

unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
{}

unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
{}

static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
{}

unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
{}

unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
{}

static void init_sdma_bitmaps(struct device_queue_manager *dqm)
{}

void program_sh_mem_settings(struct device_queue_manager *dqm,
					struct qcm_process_device *qpd)
{}

static void kfd_hws_hang(struct device_queue_manager *dqm)
{}

static int convert_to_mes_queue_type(int queue_type)
{}

static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
			 struct qcm_process_device *qpd)
{}

static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
			struct qcm_process_device *qpd)
{}

static int remove_all_queues_mes(struct device_queue_manager *dqm)
{}

static void increment_queue_count(struct device_queue_manager *dqm,
				  struct qcm_process_device *qpd,
				  struct queue *q)
{}

static void decrement_queue_count(struct device_queue_manager *dqm,
				  struct qcm_process_device *qpd,
				  struct queue *q)
{}

/*
 * Allocate a doorbell ID to this queue.
 * If doorbell_id is passed in, make sure requested ID is valid then allocate it.
 */
static int allocate_doorbell(struct qcm_process_device *qpd,
			     struct queue *q,
			     uint32_t const *restore_id)
{}

static void deallocate_doorbell(struct qcm_process_device *qpd,
				struct queue *q)
{}

static void program_trap_handler_settings(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd)
{}

static int allocate_vmid(struct device_queue_manager *dqm,
			struct qcm_process_device *qpd,
			struct queue *q)
{}

static int flush_texture_cache_nocpsch(struct kfd_node *kdev,
				struct qcm_process_device *qpd)
{}

static void deallocate_vmid(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd,
				struct queue *q)
{}

static int create_queue_nocpsch(struct device_queue_manager *dqm,
				struct queue *q,
				struct qcm_process_device *qpd,
				const struct kfd_criu_queue_priv_data *qd,
				const void *restore_mqd, const void *restore_ctl_stack)
{}

static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
{}

static inline void deallocate_hqd(struct device_queue_manager *dqm,
				struct queue *q)
{}

#define SQ_IND_CMD_CMD_KILL
#define SQ_IND_CMD_MODE_BROADCAST

static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process *p)
{}

/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
 * to avoid asynchronized access
 */
static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd,
				struct queue *q)
{}

static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd,
				struct queue *q)
{}

static int update_queue(struct device_queue_manager *dqm, struct queue *q,
			struct mqd_update_info *minfo)
{}

/* suspend_single_queue does not lock the dqm like the
 * evict_process_queues_cpsch or evict_process_queues_nocpsch. You should
 * lock the dqm before calling, and unlock after calling.
 *
 * The reason we don't lock the dqm is because this function may be
 * called on multiple queues in a loop, so rather than locking/unlocking
 * multiple times, we will just keep the dqm locked for all of the calls.
 */
static int suspend_single_queue(struct device_queue_manager *dqm,
				      struct kfd_process_device *pdd,
				      struct queue *q)
{}

/* resume_single_queue does not lock the dqm like the functions
 * restore_process_queues_cpsch or restore_process_queues_nocpsch. You should
 * lock the dqm before calling, and unlock after calling.
 *
 * The reason we don't lock the dqm is because this function may be
 * called on multiple queues in a loop, so rather than locking/unlocking
 * multiple times, we will just keep the dqm locked for all of the calls.
 */
static int resume_single_queue(struct device_queue_manager *dqm,
				      struct qcm_process_device *qpd,
				      struct queue *q)
{}

static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
					struct qcm_process_device *qpd)
{}

static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
				      struct qcm_process_device *qpd)
{}

static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
					  struct qcm_process_device *qpd)
{}

static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
					struct qcm_process_device *qpd)
{}

static int register_process(struct device_queue_manager *dqm,
					struct qcm_process_device *qpd)
{}

static int unregister_process(struct device_queue_manager *dqm,
					struct qcm_process_device *qpd)
{}

static int
set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid,
			unsigned int vmid)
{}

static void init_interrupts(struct device_queue_manager *dqm)
{}

static int initialize_nocpsch(struct device_queue_manager *dqm)
{}

static void uninitialize(struct device_queue_manager *dqm)
{}

static int start_nocpsch(struct device_queue_manager *dqm)
{}

static int stop_nocpsch(struct device_queue_manager *dqm)
{}

static int allocate_sdma_queue(struct device_queue_manager *dqm,
				struct queue *q, const uint32_t *restore_sdma_id)
{}

static void deallocate_sdma_queue(struct device_queue_manager *dqm,
				struct queue *q)
{}

/*
 * Device Queue Manager implementation for cp scheduler
 */

static int set_sched_resources(struct device_queue_manager *dqm)
{}

static int initialize_cpsch(struct device_queue_manager *dqm)
{}

static int start_cpsch(struct device_queue_manager *dqm)
{}

static int stop_cpsch(struct device_queue_manager *dqm)
{}

static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
					struct kernel_queue *kq,
					struct qcm_process_device *qpd)
{}

static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
					struct kernel_queue *kq,
					struct qcm_process_device *qpd)
{}

static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
			struct qcm_process_device *qpd,
			const struct kfd_criu_queue_priv_data *qd,
			const void *restore_mqd, const void *restore_ctl_stack)
{}

int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm,
			      uint64_t fence_value,
			      unsigned int timeout_ms)
{}

/* dqm->lock mutex has to be locked before calling this function */
static int map_queues_cpsch(struct device_queue_manager *dqm)
{}

/* dqm->lock mutex has to be locked before calling this function */
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param,
				uint32_t grace_period,
				bool reset)
{}

/* only for compute queue */
static int reset_queues_cpsch(struct device_queue_manager *dqm,
			uint16_t pasid)
{}

/* dqm->lock mutex has to be locked before calling this function */
static int execute_queues_cpsch(struct device_queue_manager *dqm,
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param,
				uint32_t grace_period)
{}

static int wait_on_destroy_queue(struct device_queue_manager *dqm,
				 struct queue *q)
{}

static int destroy_queue_cpsch(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd,
				struct queue *q)
{}

/*
 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
 * stay in user mode.
 */
#define APE1_FIXED_BITS_MASK
/* APE1 limit is inclusive and 64K aligned. */
#define APE1_LIMIT_ALIGNMENT

static bool set_cache_memory_policy(struct device_queue_manager *dqm,
				   struct qcm_process_device *qpd,
				   enum cache_policy default_policy,
				   enum cache_policy alternate_policy,
				   void __user *alternate_aperture_base,
				   uint64_t alternate_aperture_size)
{}

static int process_termination_nocpsch(struct device_queue_manager *dqm,
		struct qcm_process_device *qpd)
{}

static int get_wave_state(struct device_queue_manager *dqm,
			  struct queue *q,
			  void __user *ctl_stack,
			  u32 *ctl_stack_used_size,
			  u32 *save_area_used_size)
{}

static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
			const struct queue *q,
			u32 *mqd_size,
			u32 *ctl_stack_size)
{}

static int checkpoint_mqd(struct device_queue_manager *dqm,
			  const struct queue *q,
			  void *mqd,
			  void *ctl_stack)
{}

static int process_termination_cpsch(struct device_queue_manager *dqm,
		struct qcm_process_device *qpd)
{}

static int init_mqd_managers(struct device_queue_manager *dqm)
{}

/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
{}

struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
{}

static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
				    struct kfd_mem_obj *mqd)
{}

void device_queue_manager_uninit(struct device_queue_manager *dqm)
{}

int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
{}

static void kfd_process_hw_exception(struct work_struct *work)
{}

int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd)
{}

/*
 * Releases vmid for the trap debugger
 */
int release_debug_trap_vmid(struct device_queue_manager *dqm,
			struct qcm_process_device *qpd)
{}

#define QUEUE_NOT_FOUND
/* invalidate queue operation in array */
static void q_array_invalidate(uint32_t num_queues, uint32_t *queue_ids)
{}

/* find queue index in array */
static int q_array_get_index(unsigned int queue_id,
		uint32_t num_queues,
		uint32_t *queue_ids)
{}

struct copy_context_work_handler_workarea {};

static void copy_context_work_handler (struct work_struct *work)
{}

static uint32_t *get_queue_ids(uint32_t num_queues, uint32_t *usr_queue_id_array)
{}

int resume_queues(struct kfd_process *p,
		uint32_t num_queues,
		uint32_t *usr_queue_id_array)
{}

int suspend_queues(struct kfd_process *p,
			uint32_t num_queues,
			uint32_t grace_period,
			uint64_t exception_clear_mask,
			uint32_t *usr_queue_id_array)
{}

static uint32_t set_queue_type_for_user(struct queue_properties *q_props)
{}

void set_queue_snapshot_entry(struct queue *q,
			      uint64_t exception_clear_mask,
			      struct kfd_queue_snapshot_entry *qss_entry)
{}

int debug_lock_and_unmap(struct device_queue_manager *dqm)
{}

int debug_map_and_unlock(struct device_queue_manager *dqm)
{}

int debug_refresh_runlist(struct device_queue_manager *dqm)
{}

#if defined(CONFIG_DEBUG_FS)

static void seq_reg_dump(struct seq_file *m,
			 uint32_t (*dump)[2], uint32_t n_regs)
{}

int dqm_debugfs_hqds(struct seq_file *m, void *data)
{}

int dqm_debugfs_hang_hws(struct device_queue_manager *dqm)
{}

#endif