linux/drivers/misc/sgi-gru/grumain.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * SN Platform GRU Driver
 *
 *            DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
 *
 *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
 */

#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/err.h>
#include <linux/prefetch.h>
#include <asm/uv/uv_hub.h>
#include "gru.h"
#include "grutables.h"
#include "gruhandles.h"

unsigned long gru_options __read_mostly;

static struct device_driver gru_driver =;

static struct device gru_device =;

struct device *grudev =;

/*
 * Select a gru fault map to be used by the current cpu. Note that
 * multiple cpus may be using the same map.
 *	ZZZ should be inline but did not work on emulator
 */
int gru_cpu_fault_map_id(void)
{}

/*--------- ASID Management -------------------------------------------
 *
 *  Initially, assign asids sequentially from MIN_ASID .. MAX_ASID.
 *  Once MAX is reached, flush the TLB & start over. However,
 *  some asids may still be in use. There won't be many (percentage wise) still
 *  in use. Search active contexts & determine the value of the first
 *  asid in use ("x"s below). Set "limit" to this value.
 *  This defines a block of assignable asids.
 *
 *  When "limit" is reached, search forward from limit+1 and determine the
 *  next block of assignable asids.
 *
 *  Repeat until MAX_ASID is reached, then start over again.
 *
 *  Each time MAX_ASID is reached, increment the asid generation. Since
 *  the search for in-use asids only checks contexts with GRUs currently
 *  assigned, asids in some contexts will be missed. Prior to loading
 *  a context, the asid generation of the GTS asid is rechecked. If it
 *  doesn't match the current generation, a new asid will be assigned.
 *
 *   	0---------------x------------x---------------------x----|
 *	  ^-next	^-limit	   				^-MAX_ASID
 *
 * All asid manipulation & context loading/unloading is protected by the
 * gs_lock.
 */

/* Hit the asid limit. Start over */
static int gru_wrap_asid(struct gru_state *gru)
{}

/* Find the next chunk of unused asids */
static int gru_reset_asid_limit(struct gru_state *gru, int asid)
{}

/* Assign a new ASID to a thread context.  */
static int gru_assign_asid(struct gru_state *gru)
{}

/*
 * Clear n bits in a word. Return a word indicating the bits that were cleared.
 * Optionally, build an array of chars that contain the bit numbers allocated.
 */
static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
				       signed char *idx)
{}

unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count,
				       signed char *cbmap)
{}

unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count,
				       signed char *dsmap)
{}

static void reserve_gru_resources(struct gru_state *gru,
				  struct gru_thread_state *gts)
{}

static void free_gru_resources(struct gru_state *gru,
			       struct gru_thread_state *gts)
{}

/*
 * Check if a GRU has sufficient free resources to satisfy an allocation
 * request. Note: GRU locks may or may not be held when this is called. If
 * not held, recheck after acquiring the appropriate locks.
 *
 * Returns 1 if sufficient resources, 0 if not
 */
static int check_gru_resources(struct gru_state *gru, int cbr_au_count,
			       int dsr_au_count, int max_active_contexts)
{}

/*
 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
 * context.
 */
static int gru_load_mm_tracker(struct gru_state *gru,
					struct gru_thread_state *gts)
{}

static void gru_unload_mm_tracker(struct gru_state *gru,
					struct gru_thread_state *gts)
{}

/*
 * Decrement the reference count on a GTS structure. Free the structure
 * if the reference count goes to zero.
 */
void gts_drop(struct gru_thread_state *gts)
{}

/*
 * Locate the GTS structure for the current thread.
 */
static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
			    *vdata, int tsid)
{}

/*
 * Allocate a thread state structure.
 */
struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
		int cbr_au_count, int dsr_au_count,
		unsigned char tlb_preload_count, int options, int tsid)
{}

/*
 * Allocate a vma private data structure.
 */
struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
{}

/*
 * Find the thread state structure for the current thread.
 */
struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma,
					int tsid)
{}

/*
 * Allocate a new thread state for a GSEG. Note that races may allow
 * another thread to race to create a gts.
 */
struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
					int tsid)
{}

/*
 * Free the GRU context assigned to the thread state.
 */
static void gru_free_gru_context(struct gru_thread_state *gts)
{}

/*
 * Prefetching cachelines help hardware performance.
 * (Strictly a performance enhancement. Not functionally required).
 */
static void prefetch_data(void *p, int num, int stride)
{}

static inline long gru_copy_handle(void *d, void *s)
{}

static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
				unsigned long cbrmap, unsigned long length)
{}

static void gru_load_context_data(void *save, void *grubase, int ctxnum,
				  unsigned long cbrmap, unsigned long dsrmap,
				  int data_valid)
{}

static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
				    unsigned long cbrmap, unsigned long dsrmap)
{}

void gru_unload_context(struct gru_thread_state *gts, int savestate)
{}

/*
 * Load a GRU context by copying it from the thread data structure in memory
 * to the GRU.
 */
void gru_load_context(struct gru_thread_state *gts)
{}

/*
 * Update fields in an active CCH:
 * 	- retarget interrupts on local blade
 * 	- update sizeavail mask
 */
int gru_update_cch(struct gru_thread_state *gts)
{}

/*
 * Update CCH tlb interrupt select. Required when all the following is true:
 * 	- task's GRU context is loaded into a GRU
 * 	- task is using interrupt notification for TLB faults
 * 	- task has migrated to a different cpu on the same blade where
 * 	  it was previously running.
 */
static int gru_retarget_intr(struct gru_thread_state *gts)
{}

/*
 * Check if a GRU context is allowed to use a specific chiplet. By default
 * a context is assigned to any blade-local chiplet. However, users can
 * override this.
 * 	Returns 1 if assignment allowed, 0 otherwise
 */
static int gru_check_chiplet_assignment(struct gru_state *gru,
					struct gru_thread_state *gts)
{}

/*
 * Unload the gru context if it is not assigned to the correct blade or
 * chiplet. Misassignment can occur if the process migrates to a different
 * blade or if the user changes the selected blade/chiplet.
 */
int gru_check_context_placement(struct gru_thread_state *gts)
{}


/*
 * Insufficient GRU resources available on the local blade. Steal a context from
 * a process. This is a hack until a _real_ resource scheduler is written....
 */
#define next_ctxnum(n)
#define next_gru(b, g)

static int is_gts_stealable(struct gru_thread_state *gts,
		struct gru_blade_state *bs)
{}

static void gts_stolen(struct gru_thread_state *gts,
		struct gru_blade_state *bs)
{}

void gru_steal_context(struct gru_thread_state *gts)
{}

/*
 * Assign a gru context.
 */
static int gru_assign_context_number(struct gru_state *gru)
{}

/*
 * Scan the GRUs on the local blade & assign a GRU context.
 */
struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
{}

/*
 * gru_nopage
 *
 * Map the user's GRU segment
 *
 * 	Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
 */
vm_fault_t gru_fault(struct vm_fault *vmf)
{}