linux/drivers/misc/sgi-gru/grukservices.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * SN Platform GRU Driver
 *
 *              KERNEL SERVICES THAT USE THE GRU
 *
 *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
 */

#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
#include <linux/sync_core.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <asm/io_apic.h>
#include "gru.h"
#include "grulib.h"
#include "grutables.h"
#include "grukservices.h"
#include "gru_instructions.h"
#include <asm/uv/uv_hub.h>

/*
 * Kernel GRU Usage
 *
 * The following is an interim algorithm for management of kernel GRU
 * resources. This will likely be replaced when we better understand the
 * kernel/user requirements.
 *
 * Blade percpu resources reserved for kernel use. These resources are
 * reserved whenever the kernel context for the blade is loaded. Note
 * that the kernel context is not guaranteed to be always available. It is
 * loaded on demand & can be stolen by a user if the user demand exceeds the
 * kernel demand. The kernel can always reload the kernel context but
 * a SLEEP may be required!!!.
 *
 * Async Overview:
 *
 * 	Each blade has one "kernel context" that owns GRU kernel resources
 * 	located on the blade. Kernel drivers use GRU resources in this context
 * 	for sending messages, zeroing memory, etc.
 *
 * 	The kernel context is dynamically loaded on demand. If it is not in
 * 	use by the kernel, the kernel context can be unloaded & given to a user.
 * 	The kernel context will be reloaded when needed. This may require that
 * 	a context be stolen from a user.
 * 		NOTE: frequent unloading/reloading of the kernel context is
 * 		expensive. We are depending on batch schedulers, cpusets, sane
 * 		drivers or some other mechanism to prevent the need for frequent
 *	 	stealing/reloading.
 *
 * 	The kernel context consists of two parts:
 * 		- 1 CB & a few DSRs that are reserved for each cpu on the blade.
 * 		  Each cpu has it's own private resources & does not share them
 * 		  with other cpus. These resources are used serially, ie,
 * 		  locked, used & unlocked  on each call to a function in
 * 		  grukservices.
 * 		  	(Now that we have dynamic loading of kernel contexts, I
 * 		  	 may rethink this & allow sharing between cpus....)
 *
 *		- Additional resources can be reserved long term & used directly
 *		  by UV drivers located in the kernel. Drivers using these GRU
 *		  resources can use asynchronous GRU instructions that send
 *		  interrupts on completion.
 *		  	- these resources must be explicitly locked/unlocked
 *		  	- locked resources prevent (obviously) the kernel
 *		  	  context from being unloaded.
 *			- drivers using these resource directly issue their own
 *			  GRU instruction and must wait/check completion.
 *
 * 		  When these resources are reserved, the caller can optionally
 * 		  associate a wait_queue with the resources and use asynchronous
 * 		  GRU instructions. When an async GRU instruction completes, the
 * 		  driver will do a wakeup on the event.
 *
 */


#define ASYNC_HAN_TO_BID(h)
#define ASYNC_BID_TO_HAN(b)
#define ASYNC_HAN_TO_BS(h)

#define GRU_NUM_KERNEL_CBR
#define GRU_NUM_KERNEL_DSR_BYTES
#define GRU_NUM_KERNEL_DSR_CL

/* GRU instruction attributes for all instructions */
#define IMA

/* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
#define __gru_cacheline_aligned__

#define MAGIC

/* Default retry count for GRU errors on kernel instructions */
#define EXCEPTION_RETRY_LIMIT

/* Status of message queue sections */
#define MQS_EMPTY
#define MQS_FULL
#define MQS_NOOP

/*----------------- RESOURCE MANAGEMENT -------------------------------------*/
/* optimized for x86_64 */
struct message_queue {};

/* First word in every message - used by mesq interface */
struct message_header {};

#define HSTATUS(mq, h)

/*
 * Reload the blade's kernel context into a GRU chiplet. Called holding
 * the bs_kgts_sema for READ. Will steal user contexts if necessary.
 */
static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
{}

/*
 * Free all kernel contexts that are not currently in use.
 *   Returns 0 if all freed, else number of inuse context.
 */
static int gru_free_kernel_contexts(void)
{}

/*
 * Lock & load the kernel context for the specified blade.
 */
static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
{}

/*
 * Unlock the kernel context for the specified blade. Context is not
 * unloaded but may be stolen before next use.
 */
static void gru_unlock_kernel_context(int blade_id)
{}

/*
 * Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
 * 	- returns with preemption disabled
 */
static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
{}

/*
 * Free the current cpus reserved DSR/CBR resources.
 */
static void gru_free_cpu_resources(void *cb, void *dsr)
{}

/*
 * Reserve GRU resources to be used asynchronously.
 *   Note: currently supports only 1 reservation per blade.
 *
 * 	input:
 * 		blade_id  - blade on which resources should be reserved
 * 		cbrs	  - number of CBRs
 * 		dsr_bytes - number of DSR bytes needed
 *	output:
 *		handle to identify resource
 *		(0 = async resources already reserved)
 */
unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes,
			struct completion *cmp)
{}

/*
 * Release async resources previously reserved.
 *
 *	input:
 *		han - handle to identify resources
 */
void gru_release_async_resources(unsigned long han)
{}

/*
 * Wait for async GRU instructions to complete.
 *
 *	input:
 *		han - handle to identify resources
 */
void gru_wait_async_cbr(unsigned long han)
{}

/*
 * Lock previous reserved async GRU resources
 *
 *	input:
 *		han - handle to identify resources
 *	output:
 *		cb  - pointer to first CBR
 *		dsr - pointer to first DSR
 */
void gru_lock_async_resource(unsigned long han,  void **cb, void **dsr)
{}

/*
 * Unlock previous reserved async GRU resources
 *
 *	input:
 *		han - handle to identify resources
 */
void gru_unlock_async_resource(unsigned long han)
{}

/*----------------------------------------------------------------------*/
int gru_get_cb_exception_detail(void *cb,
		struct control_block_extended_exc_detail *excdet)
{}

static char *gru_get_cb_exception_detail_str(int ret, void *cb,
					     char *buf, int size)
{}

static int gru_wait_idle_or_exception(struct gru_control_block_status *gen)
{}

static int gru_retry_exception(void *cb)
{}

int gru_check_status_proc(void *cb)
{}

int gru_wait_proc(void *cb)
{}

static void gru_abort(int ret, void *cb, char *str)
{}

void gru_wait_abort_proc(void *cb)
{}


/*------------------------------ MESSAGE QUEUES -----------------------------*/

/* Internal status . These are NOT returned to the user. */
#define MQIE_AGAIN


/*
 * Save/restore the "present" flag that is in the second line of 2-line
 * messages
 */
static inline int get_present2(void *p)
{}

static inline void restore_present2(void *p, int val)
{}

/*
 * Create a message queue.
 * 	qlines - message queue size in cache lines. Includes 2-line header.
 */
int gru_create_message_queue(struct gru_message_queue_desc *mqd,
		void *p, unsigned int bytes, int nasid, int vector, int apicid)
{}
EXPORT_SYMBOL_GPL();

/*
 * Send a NOOP message to a message queue
 * 	Returns:
 * 		 0 - if queue is full after the send. This is the normal case
 * 		     but various races can change this.
 *		-1 - if mesq sent successfully but queue not full
 *		>0 - unexpected error. MQE_xxx returned
 */
static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
				void *mesg)
{}

/*
 * Handle a gru_mesq full.
 */
static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
				void *mesg, int lines)
{}

/*
 * Handle a PUT failure. Note: if message was a 2-line message, one of the
 * lines might have successfully have been written. Before sending the
 * message, "present" must be cleared in BOTH lines to prevent the receiver
 * from prematurely seeing the full message.
 */
static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
			void *mesg, int lines)
{}

/*
 * Handle a gru_mesq failure. Some of these failures are software recoverable
 * or retryable.
 */
static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
				void *mesg, int lines)
{}

/*
 * Send a message to a message queue
 * 	mqd	message queue descriptor
 * 	mesg	message. ust be vaddr within a GSEG
 * 	bytes	message size (<= 2 CL)
 */
int gru_send_message_gpa(struct gru_message_queue_desc *mqd, void *mesg,
				unsigned int bytes)
{}
EXPORT_SYMBOL_GPL();

/*
 * Advance the receive pointer for the queue to the next message.
 */
void gru_free_message(struct gru_message_queue_desc *mqd, void *mesg)
{}
EXPORT_SYMBOL_GPL();

/*
 * Get next message from message queue. Return NULL if no message
 * present. User must call next_message() to move to next message.
 * 	rmq	message queue
 */
void *gru_get_next_message(struct gru_message_queue_desc *mqd)
{}
EXPORT_SYMBOL_GPL();

/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/

/*
 * Load a DW from a global GPA. The GPA can be a memory or MMR address.
 */
int gru_read_gpa(unsigned long *value, unsigned long gpa)
{}
EXPORT_SYMBOL_GPL();


/*
 * Copy a block of data using the GRU resources
 */
int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
				unsigned int bytes)
{}
EXPORT_SYMBOL_GPL();

/* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
/* 	Temp - will delete after we gain confidence in the GRU		*/

static int quicktest0(unsigned long arg)
{}

#define ALIGNUP(p, q)

static int quicktest1(unsigned long arg)
{}

static int quicktest2(unsigned long arg)
{}

#define BUFSIZE
static int quicktest3(unsigned long arg)
{}

/*
 * Debugging only. User hook for various kernel tests
 * of driver & gru.
 */
int gru_ktest(unsigned long arg)
{}

int gru_kservices_init(void)
{}

void gru_kservices_exit(void)
{}