go/src/runtime/stack.go

const stackSystem

const stackMin

const fixedStack0

const fixedStack1

const fixedStack2

const fixedStack3

const fixedStack4

const fixedStack5

const fixedStack6

const fixedStack

const stackNosplit

const stackGuard

const stackDebug

const stackFromSystem

const stackFaultOnFree

const stackNoCache

const debugCheckBP

var stackPoisonCopy

const uintptrMask

const stackPreempt

const stackFork

const stackForceMove

const stackPoisonMin

var stackpool

type stackpoolItem

var stackLarge

func stackinit() {}

// stacklog2 returns ⌊log_2(n)⌋.
func stacklog2(n uintptr) int {}

// Allocates a stack from the free pool. Must be called with
// stackpool[order].item.mu held.
func stackpoolalloc(order uint8) gclinkptr {}

// Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
func stackpoolfree(x gclinkptr, order uint8) {}

// stackcacherefill/stackcacherelease implement a global pool of stack segments.
// The pool is required to prevent unlimited growth of per-thread caches.
//
//go:systemstack
func stackcacherefill(c *mcache, order uint8) {}

//go:systemstack
func stackcacherelease(c *mcache, order uint8) {}

//go:systemstack
func stackcache_clear(c *mcache) {}

// stackalloc allocates an n byte stack.
//
// stackalloc must run on the system stack because it uses per-P
// resources and must not split the stack.
//
//go:systemstack
func stackalloc(n uint32) stack {}

// stackfree frees an n byte stack allocation at stk.
//
// stackfree must run on the system stack because it uses per-P
// resources and must not split the stack.
//
//go:systemstack
func stackfree(stk stack) {}

var maxstacksize

var maxstackceiling

var ptrnames

type adjustinfo

// adjustpointer checks whether *vpp is in the old stack described by adjinfo.
// If so, it rewrites *vpp to point into the new stack.
func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {}

type bitvector

// ptrbit returns the i'th bit in bv.
// ptrbit is less efficient than iterating directly over bitvector bits,
// and should only be used in non-performance-critical code.
// See adjustpointers for an example of a high-efficiency walk of a bitvector.
func (bv *bitvector) ptrbit(i uintptr) uint8 {}

// bv describes the memory starting at address scanp.
// Adjust any pointers contained therein.
func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {}

// Note: the argument/return area is adjusted by the callee.
func adjustframe(frame *stkframe, adjinfo *adjustinfo) {}

func adjustctxt(gp *g, adjinfo *adjustinfo) {}

func adjustdefers(gp *g, adjinfo *adjustinfo) {}

func adjustpanics(gp *g, adjinfo *adjustinfo) {}

func adjustsudogs(gp *g, adjinfo *adjustinfo) {}

func fillstack(stk stack, b byte) {}

func findsghi(gp *g, stk stack) uintptr {}

// syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
// stack they refer to while synchronizing with concurrent channel
// operations. It returns the number of bytes of stack copied.
func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {}

// Copies gp's stack to a new stack of a different size.
// Caller must have changed gp status to Gcopystack.
func copystack(gp *g, newsize uintptr) {}

// round x up to a power of 2.
func round2(x int32) int32 {}

// Called from runtime·morestack when more stack is needed.
// Allocate larger stack and relocate to new stack.
// Stack growth is multiplicative, for constant amortized cost.
//
// g->atomicstatus will be Grunning or Gscanrunning upon entry.
// If the scheduler is trying to stop this g, then it will set preemptStop.
//
// This must be nowritebarrierrec because it can be called as part of
// stack growth from other nowritebarrierrec functions, but the
// compiler doesn't check this.
//
//go:nowritebarrierrec
func newstack() {}

//go:nosplit
func nilfunc() {}

// adjust Gobuf as if it executed a call to fn
// and then stopped before the first instruction in fn.
func gostartcallfn(gobuf *gobuf, fv *funcval) {}

// isShrinkStackSafe returns whether it's safe to attempt to shrink
// gp's stack. Shrinking the stack is only safe when we have precise
// pointer maps for all frames on the stack. The caller must hold the
// _Gscan bit for gp or must be running gp itself.
func isShrinkStackSafe(gp *g) bool {}

// Maybe shrink the stack being used by gp.
//
// gp must be stopped and we must own its stack. It may be in
// _Grunning, but only if this is our own user G.
func shrinkstack(gp *g) {}

// freeStackSpans frees unused stack spans at the end of GC.
func freeStackSpans() {}

type stackObjectRecord

func (r *stackObjectRecord) useGCProg() bool {}

func (r *stackObjectRecord) ptrdata() uintptr {}

// gcdata returns pointer map or GC prog of the type.
func (r *stackObjectRecord) gcdata() *byte {}

// This is exported as ABI0 via linkname so obj can call it.
//
//go:nosplit
//go:linkname morestackc
func morestackc() {}

var startingStackSize

func gcComputeStartingStackSize() {}