// SPDX-License-Identifier: GPL-2.0-or-later /* * SN Platform GRU Driver * * FAULT HANDLER FOR GRU DETECTED TLB MISSES * * This file contains code that handles TLB misses within the GRU. * These misses are reported either via interrupts or user polling of * the user CB. * * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/device.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/security.h> #include <linux/sync_core.h> #include <linux/prefetch.h> #include "gru.h" #include "grutables.h" #include "grulib.h" #include "gru_instructions.h" #include <asm/uv/uv_hub.h> /* Return codes for vtop functions */ #define VTOP_SUCCESS … #define VTOP_INVALID … #define VTOP_RETRY … /* * Test if a physical address is a valid GRU GSEG address */ static inline int is_gru_paddr(unsigned long paddr) { … } /* * Find the vma of a GRU segment. Caller must hold mmap_lock. */ struct vm_area_struct *gru_find_vma(unsigned long vaddr) { … } /* * Find and lock the gts that contains the specified user vaddr. * * Returns: * - *gts with the mmap_lock locked for read and the GTS locked. * - NULL if vaddr invalid OR is not a valid GSEG vaddr. */ static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr) { … } static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr) { … } /* * Unlock a GTS that was previously locked with gru_find_lock_gts(). */ static void gru_unlock_gts(struct gru_thread_state *gts) { … } /* * Set a CB.istatus to active using a user virtual address. This must be done * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY. * If the line is evicted, the status may be lost. The in-cache update * is necessary to prevent the user from seeing a stale cb.istatus that will * change as soon as the TFH restart is complete. Races may cause an * occasional failure to clear the cb.istatus, but that is ok. */ static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk) { … } /* * Read & clear a TFM * * The GRU has an array of fault maps. A map is private to a cpu * Only one cpu will be accessing a cpu's fault map. * * This function scans the cpu-private fault map & clears all bits that * are set. The function returns a bitmap that indicates the bits that * were cleared. Note that sense the maps may be updated asynchronously by * the GRU, atomic operations must be used to clear bits. */ static void get_clear_fault_map(struct gru_state *gru, struct gru_tlb_fault_map *imap, struct gru_tlb_fault_map *dmap) { … } /* * Atomic (interrupt context) & non-atomic (user context) functions to * convert a vaddr into a physical address. The size of the page * is returned in pageshift. * returns: * 0 - successful * < 0 - error code * 1 - (atomic only) try again in non-atomic context */ static int non_atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, int write, unsigned long *paddr, int *pageshift) { … } /* * atomic_pte_lookup * * Convert a user virtual address to a physical address * Only supports Intel large pages (2MB only) on x86_64. * ZZZ - hugepage support is incomplete * * NOTE: mmap_lock is already held on entry to this function. This * guarantees existence of the page tables. */ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, int write, unsigned long *paddr, int *pageshift) { … } static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, int write, int atomic, unsigned long *gpa, int *pageshift) { … } /* * Flush a CBE from cache. The CBE is clean in the cache. Dirty the * CBE cacheline so that the line will be written back to home agent. * Otherwise the line may be silently dropped. This has no impact * except on performance. */ static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe) { … } /* * Preload the TLB with entries that may be required. Currently, preloading * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to * the end of the bcopy tranfer, whichever is smaller. */ static void gru_preload_tlb(struct gru_state *gru, struct gru_thread_state *gts, int atomic, unsigned long fault_vaddr, int asid, int write, unsigned char tlb_preload_count, struct gru_tlb_fault_handle *tfh, struct gru_control_block_extended *cbe) { … } /* * Drop a TLB entry into the GRU. The fault is described by info in an TFH. * Input: * cb Address of user CBR. Null if not running in user context * Return: * 0 = dropin, exception, or switch to UPM successful * 1 = range invalidate active * < 0 = error code * */ static int gru_try_dropin(struct gru_state *gru, struct gru_thread_state *gts, struct gru_tlb_fault_handle *tfh, struct gru_instruction_bits *cbk) { … } /* * Process an external interrupt from the GRU. This interrupt is * caused by a TLB miss. * Note that this is the interrupt handler that is registered with linux * interrupt handlers. */ static irqreturn_t gru_intr(int chiplet, int blade) { … } irqreturn_t gru0_intr(int irq, void *dev_id) { … } irqreturn_t gru1_intr(int irq, void *dev_id) { … } irqreturn_t gru_intr_mblade(int irq, void *dev_id) { … } static int gru_user_dropin(struct gru_thread_state *gts, struct gru_tlb_fault_handle *tfh, void *cb) { … } /* * This interface is called as a result of a user detecting a "call OS" bit * in a user CB. Normally means that a TLB fault has occurred. * cb - user virtual address of the CB */ int gru_handle_user_call_os(unsigned long cb) { … } /* * Fetch the exception detail information for a CB that terminated with * an exception. */ int gru_get_exception_detail(unsigned long arg) { … } /* * User request to unload a context. Content is saved for possible reload. */ static int gru_unload_all_contexts(void) { … } int gru_user_unload_context(unsigned long arg) { … } /* * User request to flush a range of virtual addresses from the GRU TLB * (Mainly for testing). */ int gru_user_flush_tlb(unsigned long arg) { … } /* * Fetch GSEG statisticss */ long gru_get_gseg_statistics(unsigned long arg) { … } /* * Register the current task as the user of the GSEG slice. * Needed for TLB fault interrupt targeting. */ int gru_set_context_option(unsigned long arg) { … }