go/src/runtime/export_test.go

var Fadd64

var Fsub64

var Fmul64

var Fdiv64

var F64to32

var F32to64

var Fcmp64

var Fintto64

var F64toint

var Entersyscall

var Exitsyscall

var LockedOSThread

var Xadduintptr

var ReadRandomFailed

var Fastlog2

var Atoi

var Atoi32

var ParseByteCount

var Nanotime

var NetpollBreak

var Usleep

var PhysPageSize

var PhysHugePageSize

var NetpollGenericInit

var Memmove

var MemclrNoHeapPointers

var CgoCheckPointer

const CrashStackImplemented

const TracebackInnerFrames

const TracebackOuterFrames

var MapKeys

var MapValues

var LockPartialOrder

type TimeTimer

type LockRank

func (l LockRank) String() string {}

const PreemptMSupported

type LFNode

func LFStackPush(head *uint64, node *LFNode) {}

func LFStackPop(head *uint64) *LFNode {}

func LFNodeValidate(node *LFNode) {}

func Netpoll(delta int64) {}

func GCMask(x any) (ret []byte) {}

func RunSchedLocalQueueTest() {}

func RunSchedLocalQueueStealTest() {}

func RunSchedLocalQueueEmptyTest(iters int) {}

var StringHash

var BytesHash

var Int32Hash

var Int64Hash

var MemHash

var MemHash32

var MemHash64

var EfaceHash

var IfaceHash

var UseAeshash

func MemclrBytes(b []byte) {}

const HashLoad

// entry point for testing
func GostringW(w []uint16) (s string) {}

var Open

var Close

var Read

var Write

func Envs() []string     {}

func SetEnvs(e []string) {}

const PtrSize

var ForceGCPeriod

// SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
// the "environment" traceback level, so later calls to
// debug.SetTraceback (e.g., from testing timeouts) can't lower it.
func SetTracebackEnv(level string) {}

var ReadUnaligned32

var ReadUnaligned64

func CountPagesInUse() (pagesInUse, counted uintptr) {}

func Fastrand() uint32          {}

func Fastrand64() uint64        {}

func Fastrandn(n uint32) uint32 {}

type ProfBuf

func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {}

func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {}

const ProfBufBlocking

const ProfBufNonBlocking

func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {}

func (p *ProfBuf) Close() {}

type CPUStats

func ReadCPUStats() CPUStats {}

func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {}

var DoubleCheckReadMemStats

// ReadMemStatsSlow returns both the runtime-computed MemStats and
// MemStats accumulated by scanning the heap.
func ReadMemStatsSlow() (base, slow MemStats) {}

// ShrinkStackAndVerifyFramePointers attempts to shrink the stack of the current goroutine
// and verifies that unwinding the new stack doesn't crash, even if the old
// stack has been freed or reused (simulated via poisoning).
func ShrinkStackAndVerifyFramePointers() {}

// BlockOnSystemStack switches to the system stack, prints "x\n" to
// stderr, and blocks in a stack containing
// "runtime.blockOnSystemStackInternal".
func BlockOnSystemStack() {}

func blockOnSystemStackInternal() {}

type RWMutex

func (rw *RWMutex) Init() {}

func (rw *RWMutex) RLock() {}

func (rw *RWMutex) RUnlock() {}

func (rw *RWMutex) Lock() {}

func (rw *RWMutex) Unlock() {}

func LockOSCounts() (external, internal uint32) {}

//go:noinline
func TracebackSystemstack(stk []uintptr, i int) int {}

func KeepNArenaHints(n int) {}

// MapNextArenaHint reserves a page at the next arena growth hint,
// preventing the arena from growing there, and returns the range of
// addresses that are no longer viable.
//
// This may fail to reserve memory. If it fails, it still returns the
// address range it attempted to reserve.
func MapNextArenaHint() (start, end uintptr, ok bool) {}

func GetNextArenaHint() uintptr {}

type G

type Sudog

func Getg() *G {}

func Goid() uint64 {}

func GIsWaitingOnMutex(gp *G) bool {}

var CasGStatusAlwaysTrack

//go:noinline
func PanicForTesting(b []byte, i int) byte {}

//go:noinline
func unexportedPanicForTesting(b []byte, i int) byte {}

func G0StackOverflow() {}

func stackOverflow(x *byte) {}

func RunGetgThreadSwitchTest() {}

const PageSize

const PallocChunkPages

const PageAlloc64Bit

const PallocSumBytes

type PallocSum

func PackPallocSum(start, max, end uint) PallocSum {}

func (m PallocSum) Start() uint                    {}

func (m PallocSum) Max() uint                      {}

func (m PallocSum) End() uint                      {}

type PallocBits

func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {}

func (b *PallocBits) AllocRange(i, n uint)       {}

func (b *PallocBits) Free(i, n uint)             {}

func (b *PallocBits) Summarize() PallocSum       {}

func (b *PallocBits) PopcntRange(i, n uint) uint {}

// SummarizeSlow is a slow but more obviously correct implementation
// of (*pallocBits).summarize. Used for testing.
func SummarizeSlow(b *PallocBits) PallocSum {}

// Expose non-trivial helpers for testing.
func FindBitRange64(c uint64, n uint) uint {}

// Given two PallocBits, returns a set of bit ranges where
// they differ.
func DiffPallocBits(a, b *PallocBits) []BitRange {}

// StringifyPallocBits gets the bits in the bit range r from b,
// and returns a string containing the bits as ASCII 0 and 1
// characters.
func StringifyPallocBits(b *PallocBits, r BitRange) string {}

type PallocData

func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {}

func (d *PallocData) AllocRange(i, n uint) {}

func (d *PallocData) ScavengedSetRange(i, n uint) {}

func (d *PallocData) PallocBits() *PallocBits {}

func (d *PallocData) Scavenged() *PallocBits {}

// Expose fillAligned for testing.
func FillAligned(x uint64, m uint) uint64 {}

type PageCache

const PageCachePages

func NewPageCache(base uintptr, cache, scav uint64) PageCache {}

func (c *PageCache) Empty() bool   {}

func (c *PageCache) Base() uintptr {}

func (c *PageCache) Cache() uint64 {}

func (c *PageCache) Scav() uint64  {}

func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {}

func (c *PageCache) Flush(s *PageAlloc) {}

type ChunkIdx

type PageAlloc

func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {}

func (p *PageAlloc) AllocToCache() PageCache {}

func (p *PageAlloc) Free(base, npages uintptr) {}

func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {}

func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {}

func (p *PageAlloc) InUse() []AddrRange {}

// Returns nil if the PallocData's L2 is missing.
func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {}

type AddrRange

// MakeAddrRange creates a new address range.
func MakeAddrRange(base, limit uintptr) AddrRange {}

// Base returns the virtual base address of the address range.
func (a AddrRange) Base() uintptr {}

// Base returns the virtual address of the limit of the address range.
func (a AddrRange) Limit() uintptr {}

// Equals returns true if the two address ranges are exactly equal.
func (a AddrRange) Equals(b AddrRange) bool {}

// Size returns the size in bytes of the address range.
func (a AddrRange) Size() uintptr {}

var testSysStat

type AddrRanges

// NewAddrRanges creates a new empty addrRanges.
//
// Note that this initializes addrRanges just like in the
// runtime, so its memory is persistentalloc'd. Call this
// function sparingly since the memory it allocates is
// leaked.
//
// This AddrRanges is mutable, so we can test methods like
// Add.
func NewAddrRanges() AddrRanges {}

// MakeAddrRanges creates a new addrRanges populated with
// the ranges in a.
//
// The returned AddrRanges is immutable, so methods like
// Add will fail.
func MakeAddrRanges(a ...AddrRange) AddrRanges {}

// Ranges returns a copy of the ranges described by the
// addrRanges.
func (a *AddrRanges) Ranges() []AddrRange {}

// FindSucc returns the successor to base. See addrRanges.findSucc
// for more details.
func (a *AddrRanges) FindSucc(base uintptr) int {}

// Add adds a new AddrRange to the AddrRanges.
//
// The AddrRange must be mutable (i.e. created by NewAddrRanges),
// otherwise this method will throw.
func (a *AddrRanges) Add(r AddrRange) {}

// TotalBytes returns the totalBytes field of the addrRanges.
func (a *AddrRanges) TotalBytes() uintptr {}

type BitRange

// NewPageAlloc creates a new page allocator for testing and
// initializes it with the scav and chunks maps. Each key in these maps
// represents a chunk index and each value is a series of bit ranges to
// set within each bitmap's chunk.
//
// The initialization of the pageAlloc preserves the invariant that if a
// scavenged bit is set the alloc bit is necessarily unset, so some
// of the bits described by scav may be cleared in the final bitmap if
// ranges in chunks overlap with them.
//
// scav is optional, and if nil, the scavenged bitmap will be cleared
// (as opposed to all 1s, which it usually is). Furthermore, every
// chunk index in scav must appear in chunks; ones that do not are
// ignored.
func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {}

// FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
// is called the pageAlloc may no longer be used. The object itself will be
// collected by the garbage collector once it is no longer live.
func FreePageAlloc(pp *PageAlloc) {}

var BaseChunkIdx

// PageBase returns an address given a chunk index and a page index
// relative to that chunk.
func PageBase(c ChunkIdx, pageIdx uint) uintptr {}

type BitsMismatch

func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {}

func PageCachePagesLeaked() (leaked uintptr) {}

var ProcYield

var OSYield

type Mutex

var Lock

var Unlock

var MutexContended

func SemRootLock(addr *uint32) *mutex {}

var Semacquire

var Semrelease1

func SemNwait(addr *uint32) uint32 {}

const SemTableSize

type SemTable

// Enqueue simulates enqueuing a waiter for a semaphore (or lock) at addr.
func (t *SemTable) Enqueue(addr *uint32) {}

// Dequeue simulates dequeuing a waiter for a semaphore (or lock) at addr.
//
// Returns true if there actually was a waiter to be dequeued.
func (t *SemTable) Dequeue(addr *uint32) bool {}

type MSpan

// Allocate an mspan for testing.
func AllocMSpan() *MSpan {}

// Free an allocated mspan.
func FreeMSpan(s *MSpan) {}

func MSpanCountAlloc(ms *MSpan, bits []byte) int {}

const TimeHistSubBucketBits

const TimeHistNumSubBuckets

const TimeHistNumBuckets

const TimeHistMinBucketBits

const TimeHistMaxBucketBits

type TimeHistogram

// Counts returns the counts for the given bucket, subBucket indices.
// Returns true if the bucket was valid, otherwise returns the counts
// for the overflow bucket if bucket > 0 or the underflow bucket if
// bucket < 0, and false.
func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {}

func (th *TimeHistogram) Record(duration int64) {}

var TimeHistogramMetricsBuckets

func SetIntArgRegs(a int) int {}

func FinalizerGAsleep() bool {}

var GCTestMoveStackOnNextCall

// For GCTestIsReachable, it's important that we do this as a call so
// escape analysis can see through it.
func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {}

// For GCTestPointerClass, it's important that we do this as a call so
// escape analysis can see through it.
//
// This is nosplit because gcTestPointerClass is.
//
//go:nosplit
func GCTestPointerClass(p unsafe.Pointer) string {}

const Raceenabled

const GCBackgroundUtilization

const GCGoalUtilization

const DefaultHeapMinimum

const MemoryLimitHeapGoalHeadroomPercent

const MemoryLimitMinHeapGoalHeadroom

type GCController

func NewGCController(gcPercent int, memoryLimit int64) *GCController {}

func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {}

func (c *GCController) AssistWorkPerByte() float64 {}

func (c *GCController) HeapGoal() uint64 {}

func (c *GCController) HeapLive() uint64 {}

func (c *GCController) HeapMarked() uint64 {}

func (c *GCController) Triggered() uint64 {}

type GCControllerReviseDelta

func (c *GCController) Revise(d GCControllerReviseDelta) {}

func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {}

func (c *GCController) AddIdleMarkWorker() bool {}

func (c *GCController) NeedIdleMarkWorker() bool {}

func (c *GCController) RemoveIdleMarkWorker() {}

func (c *GCController) SetMaxIdleMarkWorkers(max int32) {}

var alwaysFalse

var escapeSink

func Escape[T any](x T) T {}

// Acquirem blocks preemption.
func Acquirem() {}

func Releasem() {}

var Timediv

type PIController

func NewPIController(kp, ti, tt, min, max float64) *PIController {}

func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {}

const CapacityPerProc

const GCCPULimiterUpdatePeriod

type GCCPULimiter

func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {}

func (l *GCCPULimiter) Fill() uint64 {}

func (l *GCCPULimiter) Capacity() uint64 {}

func (l *GCCPULimiter) Overflow() uint64 {}

func (l *GCCPULimiter) Limiting() bool {}

func (l *GCCPULimiter) NeedUpdate(now int64) bool {}

func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {}

func (l *GCCPULimiter) FinishGCTransition(now int64) {}

func (l *GCCPULimiter) Update(now int64) {}

func (l *GCCPULimiter) AddAssistTime(t int64) {}

func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {}

const ScavengePercent

type Scavenger

func (s *Scavenger) Start() {}

// BlockUntilParked blocks until the scavenger parks, or until
// timeout is exceeded. Returns true if the scavenger parked.
//
// Note that in testing, parked means something slightly different.
// In anger, the scavenger parks to sleep, too, but in testing,
// it only parks when it actually has no work to do.
func (s *Scavenger) BlockUntilParked(timeout int64) bool {}

// Released returns how many bytes the scavenger released.
func (s *Scavenger) Released() uintptr {}

// Wake wakes up a parked scavenger to keep running.
func (s *Scavenger) Wake() {}

// Stop cleans up the scavenger's resources. The scavenger
// must be parked for this to work.
func (s *Scavenger) Stop() {}

type ScavengeIndex

func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {}

func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {}

func (s *ScavengeIndex) AllocRange(base, limit uintptr) {}

func (s *ScavengeIndex) FreeRange(base, limit uintptr) {}

func (s *ScavengeIndex) ResetSearchAddrs() {}

func (s *ScavengeIndex) NextGen() {}

func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {}

func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {}

const GTrackingPeriod

var ZeroBase

const UserArenaChunkBytes

type UserArena

func NewUserArena() *UserArena {}

func (a *UserArena) New(out *any) {}

func (a *UserArena) Slice(sl any, cap int) {}

func (a *UserArena) Free() {}

func GlobalWaitingArenaChunks() int {}

func UserArenaClone[T any](s T) T {}

var AlignUp

func BlockUntilEmptyFinalizerQueue(timeout int64) bool {}

func FrameStartLine(f *Frame) int {}

// PersistentAlloc allocates some memory that lives outside the Go heap.
// This memory will never be freed; use sparingly.
func PersistentAlloc(n uintptr) unsafe.Pointer {}

// FPCallers works like Callers and uses frame pointer unwinding to populate
// pcBuf with the return addresses of the physical frames on the stack.
func FPCallers(pcBuf []uintptr) int {}

const FramePointerEnabled

var IsPinned

var GetPinCounter

func SetPinnerLeakPanic(f func()) {}

func GetPinnerLeakPanic() func() {}

var testUintptr

func MyGenericFunc[T any]() {}

func UnsafePoint(pc uintptr) bool {}

type TraceMap

func (m *TraceMap) PutString(s string) (uint64, bool) {}

func (m *TraceMap) Reset() {}