// arena_newArena is a wrapper around newUserArena. // //go:linkname arena_newArena arena.runtime_arena_newArena func arena_newArena() unsafe.Pointer { … } // arena_arena_New is a wrapper around (*userArena).new, except that typ // is an any (must be a *_type, still) and typ must be a type descriptor // for a pointer to the type to actually be allocated, i.e. pass a *T // to allocate a T. This is necessary because this function returns a *T. // //go:linkname arena_arena_New arena.runtime_arena_arena_New func arena_arena_New(arena unsafe.Pointer, typ any) any { … } // arena_arena_Slice is a wrapper around (*userArena).slice. // //go:linkname arena_arena_Slice arena.runtime_arena_arena_Slice func arena_arena_Slice(arena unsafe.Pointer, slice any, cap int) { … } // arena_arena_Free is a wrapper around (*userArena).free. // //go:linkname arena_arena_Free arena.runtime_arena_arena_Free func arena_arena_Free(arena unsafe.Pointer) { … } // arena_heapify takes a value that lives in an arena and makes a copy // of it on the heap. Values that don't live in an arena are returned unmodified. // //go:linkname arena_heapify arena.runtime_arena_heapify func arena_heapify(s any) any { … } const userArenaChunkBytesMax … const userArenaChunkBytes … const userArenaChunkPages … const userArenaChunkMaxAllocBytes … func init() { … } // userArenaChunkReserveBytes returns the amount of additional bytes to reserve for // heap metadata. func userArenaChunkReserveBytes() uintptr { … } type userArena … // newUserArena creates a new userArena ready to be used. func newUserArena() *userArena { … } // new allocates a new object of the provided type into the arena, and returns // its pointer. // // This operation is not safe to call concurrently with other operations on the // same arena. func (a *userArena) new(typ *_type) unsafe.Pointer { … } // slice allocates a new slice backing store. slice must be a pointer to a slice // (i.e. *[]T), because userArenaSlice will update the slice directly. // // cap determines the capacity of the slice backing store and must be non-negative. // // This operation is not safe to call concurrently with other operations on the // same arena. func (a *userArena) slice(sl any, cap int) { … } // free returns the userArena's chunks back to mheap and marks it as defunct. // // Must be called at most once for any given arena. // // This operation is not safe to call concurrently with other operations on the // same arena. func (a *userArena) free() { … } // alloc reserves space in the current chunk or calls refill and reserves space // in a new chunk. If cap is negative, the type will be taken literally, otherwise // it will be considered as an element type for a slice backing store with capacity // cap. func (a *userArena) alloc(typ *_type, cap int) unsafe.Pointer { … } // refill inserts the current arena chunk onto the full list and obtains a new // one, either from the partial list or allocating a new one, both from mheap. func (a *userArena) refill() *mspan { … } type liveUserArenaChunk … var userArenaState … // userArenaNextFree reserves space in the user arena for an item of the specified // type. If cap is not -1, this is for an array of cap elements of type t. func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer { … } // userArenaHeapBitsSetSliceType is the equivalent of heapBitsSetType but for // Go slice backing store values allocated in a user arena chunk. It sets up the // heap bitmap for n consecutive values with type typ allocated at address ptr. func userArenaHeapBitsSetSliceType(typ *_type, n int, ptr unsafe.Pointer, s *mspan) { … } // userArenaHeapBitsSetType is the equivalent of heapSetType but for // non-slice-backing-store Go values allocated in a user arena chunk. It // sets up the type metadata for the value with type typ allocated at address ptr. // base is the base address of the arena chunk. func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, s *mspan) { … } type writeUserArenaHeapBits … func (s *mspan) writeUserArenaHeapBits(addr uintptr) (h writeUserArenaHeapBits) { … } // write appends the pointerness of the next valid pointer slots // using the low valid bits of bits. 1=pointer, 0=scalar. func (h writeUserArenaHeapBits) write(s *mspan, bits, valid uintptr) writeUserArenaHeapBits { … } // Add padding of size bytes. func (h writeUserArenaHeapBits) pad(s *mspan, size uintptr) writeUserArenaHeapBits { … } // Flush the bits that have been written, and add zeros as needed // to cover the full object [addr, addr+size). func (h writeUserArenaHeapBits) flush(s *mspan, addr, size uintptr) { … } // bswapIfBigEndian swaps the byte order of the uintptr on goarch.BigEndian platforms, // and leaves it alone elsewhere. func bswapIfBigEndian(x uintptr) uintptr { … } // newUserArenaChunk allocates a user arena chunk, which maps to a single // heap arena and single span. Returns a pointer to the base of the chunk // (this is really important: we need to keep the chunk alive) and the span. func newUserArenaChunk() (unsafe.Pointer, *mspan) { … } // isUnusedUserArenaChunk indicates that the arena chunk has been set to fault // and doesn't contain any scannable memory anymore. However, it might still be // mSpanInUse as it sits on the quarantine list, since it needs to be swept. // // This is not safe to execute unless the caller has ownership of the mspan or // the world is stopped (preemption is prevented while the relevant state changes). // // This is really only meant to be used by accounting tests in the runtime to // distinguish when a span shouldn't be counted (since mSpanInUse might not be // enough). func (s *mspan) isUnusedUserArenaChunk() bool { … } // setUserArenaChunkToFault sets the address space for the user arena chunk to fault // and releases any underlying memory resources. // // Must be in a non-preemptible state to ensure the consistency of statistics // exported to MemStats. func (s *mspan) setUserArenaChunkToFault() { … } // inUserArenaChunk returns true if p points to a user arena chunk. func inUserArenaChunk(p uintptr) bool { … } // freeUserArenaChunk releases the user arena represented by s back to the runtime. // // x must be a live pointer within s. // // The runtime will set the user arena to fault once it's safe (the GC is no longer running) // and then once the user arena is no longer referenced by the application, will allow it to // be reused. func freeUserArenaChunk(s *mspan, x unsafe.Pointer) { … } // allocUserArenaChunk attempts to reuse a free user arena chunk represented // as a span. // // Must be in a non-preemptible state to ensure the consistency of statistics // exported to MemStats. // // Acquires the heap lock. Must run on the system stack for that reason. // //go:systemstack func (h *mheap) allocUserArenaChunk() *mspan { … }