const mallocHeaderSize … const minSizeForMallocHeader … // heapBitsInSpan returns true if the size of an object implies its ptr/scalar // data is stored at the end of the span, and is accessible via span.heapBits. // // Note: this works for both rounded-up sizes (span.elemsize) and unrounded // type sizes because minSizeForMallocHeader is guaranteed to be at a size // class boundary. // //go:nosplit func heapBitsInSpan(userSize uintptr) bool { … } type typePointers … // typePointersOf returns an iterator over all heap pointers in the range [addr, addr+size). // // addr and addr+size must be in the range [span.base(), span.limit). // // Note: addr+size must be passed as the limit argument to the iterator's next method on // each iteration. This slightly awkward API is to allow typePointers to be destructured // by the compiler. // // nosplit because it is used during write barriers and must not be preempted. // //go:nosplit func (span *mspan) typePointersOf(addr, size uintptr) typePointers { … } // typePointersOfUnchecked is like typePointersOf, but assumes addr is the base // of an allocation slot in a span (the start of the object if no header, the // header otherwise). It returns an iterator that generates all pointers // in the range [addr, addr+span.elemsize). // // nosplit because it is used during write barriers and must not be preempted. // //go:nosplit func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers { … } // typePointersOfType is like typePointersOf, but assumes addr points to one or more // contiguous instances of the provided type. The provided type must not be nil and // it must not have its type metadata encoded as a gcprog. // // It returns an iterator that tiles typ.GCData starting from addr. It's the caller's // responsibility to limit iteration. // // nosplit because its callers are nosplit and require all their callees to be nosplit. // //go:nosplit func (span *mspan) typePointersOfType(typ *abi.Type, addr uintptr) typePointers { … } // nextFast is the fast path of next. nextFast is written to be inlineable and, // as the name implies, fast. // // Callers that are performance-critical should iterate using the following // pattern: // // for { // var addr uintptr // if tp, addr = tp.nextFast(); addr == 0 { // if tp, addr = tp.next(limit); addr == 0 { // break // } // } // // Use addr. // ... // } // // nosplit because it is used during write barriers and must not be preempted. // //go:nosplit func (tp typePointers) nextFast() (typePointers, uintptr) { … } // next advances the pointers iterator, returning the updated iterator and // the address of the next pointer. // // limit must be the same each time it is passed to next. // // nosplit because it is used during write barriers and must not be preempted. // //go:nosplit func (tp typePointers) next(limit uintptr) (typePointers, uintptr) { … } // fastForward moves the iterator forward by n bytes. n must be a multiple // of goarch.PtrSize. limit must be the same limit passed to next for this // iterator. // // nosplit because it is used during write barriers and must not be preempted. // //go:nosplit func (tp typePointers) fastForward(n, limit uintptr) typePointers { … } // objBase returns the base pointer for the object containing addr in span. // // Assumes that addr points into a valid part of span (span.base() <= addr < span.limit). // //go:nosplit func (span *mspan) objBase(addr uintptr) uintptr { … } // bulkBarrierPreWrite executes a write barrier // for every pointer slot in the memory range [src, src+size), // using pointer/scalar information from [dst, dst+size). // This executes the write barriers necessary before a memmove. // src, dst, and size must be pointer-aligned. // The range [dst, dst+size) must lie within a single object. // It does not perform the actual writes. // // As a special case, src == 0 indicates that this is being used for a // memclr. bulkBarrierPreWrite will pass 0 for the src of each write // barrier. // // Callers should call bulkBarrierPreWrite immediately before // calling memmove(dst, src, size). This function is marked nosplit // to avoid being preempted; the GC must not stop the goroutine // between the memmove and the execution of the barriers. // The caller is also responsible for cgo pointer checks if this // may be writing Go pointers into non-Go memory. // // Pointer data is not maintained for allocations containing // no pointers at all; any caller of bulkBarrierPreWrite must first // make sure the underlying allocation contains pointers, usually // by checking typ.PtrBytes. // // The typ argument is the type of the space at src and dst (and the // element type if src and dst refer to arrays) and it is optional. // If typ is nil, the barrier will still behave as expected and typ // is used purely as an optimization. However, it must be used with // care. // // If typ is not nil, then src and dst must point to one or more values // of type typ. The caller must ensure that the ranges [src, src+size) // and [dst, dst+size) refer to one or more whole values of type src and // dst (leaving off the pointerless tail of the space is OK). If this // precondition is not followed, this function will fail to scan the // right pointers. // // When in doubt, pass nil for typ. That is safe and will always work. // // Callers must perform cgo checks if goexperiment.CgoCheck2. // //go:nosplit func bulkBarrierPreWrite(dst, src, size uintptr, typ *abi.Type) { … } // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but // does not execute write barriers for [dst, dst+size). // // In addition to the requirements of bulkBarrierPreWrite // callers need to ensure [dst, dst+size) is zeroed. // // This is used for special cases where e.g. dst was just // created and zeroed with malloc. // // The type of the space can be provided purely as an optimization. // See bulkBarrierPreWrite's comment for more details -- use this // optimization with great care. // //go:nosplit func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, typ *abi.Type) { … } // initHeapBits initializes the heap bitmap for a span. func (s *mspan) initHeapBits() { … } // heapBits returns the heap ptr/scalar bits stored at the end of the span for // small object spans and heap arena spans. // // Note that the uintptr of each element means something different for small object // spans and for heap arena spans. Small object spans are easy: they're never interpreted // as anything but uintptr, so they're immune to differences in endianness. However, the // heapBits for user arena spans is exposed through a dummy type descriptor, so the byte // ordering needs to match the same byte ordering the compiler would emit. The compiler always // emits the bitmap data in little endian byte ordering, so on big endian platforms these // uintptrs will have their byte orders swapped from what they normally would be. // // heapBitsInSpan(span.elemsize) or span.isUserArenaChunk must be true. // //go:nosplit func (span *mspan) heapBits() []uintptr { … } // Helper for constructing a slice for the span's heap bits. // //go:nosplit func heapBitsSlice(spanBase, spanSize uintptr) []uintptr { … } // heapBitsSmallForAddr loads the heap bits for the object stored at addr from span.heapBits. // // addr must be the base pointer of an object in the span. heapBitsInSpan(span.elemsize) // must be true. // //go:nosplit func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr { … } // writeHeapBitsSmall writes the heap bits for small objects whose ptr/scalar data is // stored as a bitmap at the end of the span. // // Assumes dataSize is <= ptrBits*goarch.PtrSize. x must be a pointer into the span. // heapBitsInSpan(dataSize) must be true. dataSize must be >= typ.Size_. // //go:nosplit func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) { … } const doubleCheckHeapSetType … func heapSetTypeNoHeader(x, dataSize uintptr, typ *_type, span *mspan) uintptr { … } func heapSetTypeSmallHeader(x, dataSize uintptr, typ *_type, header **_type, span *mspan) uintptr { … } func heapSetTypeLarge(x, dataSize uintptr, typ *_type, span *mspan) uintptr { … } func doubleCheckHeapType(x, dataSize uintptr, gctyp *_type, header **_type, span *mspan) { … } func doubleCheckHeapPointers(x, dataSize uintptr, typ *_type, header **_type, span *mspan) { … } func doubleCheckHeapPointersInterior(x, interior, size, dataSize uintptr, typ *_type, header **_type, span *mspan) { … } //go:nosplit func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) { … } func dumpTypePointers(tp typePointers) { … } // addb returns the byte pointer p+n. // //go:nowritebarrier //go:nosplit func addb(p *byte, n uintptr) *byte { … } // subtractb returns the byte pointer p-n. // //go:nowritebarrier //go:nosplit func subtractb(p *byte, n uintptr) *byte { … } // add1 returns the byte pointer p+1. // //go:nowritebarrier //go:nosplit func add1(p *byte) *byte { … } // subtract1 returns the byte pointer p-1. // // nosplit because it is used during write barriers and must not be preempted. // //go:nowritebarrier //go:nosplit func subtract1(p *byte) *byte { … } type markBits … //go:nosplit func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits { … } // refillAllocCache takes 8 bytes s.allocBits starting at whichByte // and negates them so that ctz (count trailing zeros) instructions // can be used. It then places these 8 bytes into the cached 64 bit // s.allocCache. func (s *mspan) refillAllocCache(whichByte uint16) { … } // nextFreeIndex returns the index of the next free object in s at // or after s.freeindex. // There are hardware instructions that can be used to make this // faster if profiling warrants it. func (s *mspan) nextFreeIndex() uint16 { … } // isFree reports whether the index'th object in s is unallocated. // // The caller must ensure s.state is mSpanInUse, and there must have // been no preemption points since ensuring this (which could allow a // GC transition, which would allow the state to change). func (s *mspan) isFree(index uintptr) bool { … } // divideByElemSize returns n/s.elemsize. // n must be within [0, s.npages*_PageSize), // or may be exactly s.npages*_PageSize // if s.elemsize is from sizeclasses.go. // // nosplit, because it is called by objIndex, which is nosplit // //go:nosplit func (s *mspan) divideByElemSize(n uintptr) uintptr { … } // nosplit, because it is called by other nosplit code like findObject // //go:nosplit func (s *mspan) objIndex(p uintptr) uintptr { … } func markBitsForAddr(p uintptr) markBits { … } func (s *mspan) markBitsForIndex(objIndex uintptr) markBits { … } func (s *mspan) markBitsForBase() markBits { … } // isMarked reports whether mark bit m is set. func (m markBits) isMarked() bool { … } // setMarked sets the marked bit in the markbits, atomically. func (m markBits) setMarked() { … } // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically. func (m markBits) setMarkedNonAtomic() { … } // clearMarked clears the marked bit in the markbits, atomically. func (m markBits) clearMarked() { … } // markBitsForSpan returns the markBits for the span base address base. func markBitsForSpan(base uintptr) (mbits markBits) { … } // advance advances the markBits to the next object in the span. func (m *markBits) advance() { … } const clobberdeadPtr … // badPointer throws bad pointer in heap panic. func badPointer(s *mspan, p, refBase, refOff uintptr) { … } // findObject returns the base address for the heap object containing // the address p, the object's span, and the index of the object in s. // If p does not point into a heap object, it returns base == 0. // // If p points is an invalid heap pointer and debug.invalidptr != 0, // findObject panics. // // refBase and refOff optionally give the base address of the object // in which the pointer p was found and the byte offset at which it // was found. These are used for error reporting. // // It is nosplit so it is safe for p to be a pointer to the current goroutine's stack. // Since p is a uintptr, it would not be adjusted if the stack were to move. // // findObject should be an internal detail, // but widely used packages access it using linkname. // Notable members of the hall of shame include: // - github.com/bytedance/sonic // // Do not remove or change the type signature. // See go.dev/issue/67401. // //go:linkname findObject //go:nosplit func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) { … } // reflect_verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok. // //go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr func reflect_verifyNotInHeapPtr(p uintptr) bool { … } const ptrBits … // bulkBarrierBitmap executes write barriers for copying from [src, // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is // assumed to start maskOffset bytes into the data covered by the // bitmap in bits (which may not be a multiple of 8). // // This is used by bulkBarrierPreWrite for writes to data and BSS. // //go:nosplit func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) { … } // typeBitsBulkBarrier executes a write barrier for every // pointer that would be copied from [src, src+size) to [dst, // dst+size) by a memmove using the type bitmap to locate those // pointer slots. // // The type typ must correspond exactly to [src, src+size) and [dst, dst+size). // dst, src, and size must be pointer-aligned. // The type typ must have a plain bitmap, not a GC program. // The only use of this function is in channel sends, and the // 64 kB channel element limit takes care of this for us. // // Must not be preempted because it typically runs right before memmove, // and the GC must observe them as an atomic action. // // Callers must perform cgo checks if goexperiment.CgoCheck2. // //go:nosplit func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) { … } // countAlloc returns the number of objects allocated in span s by // scanning the mark bitmap. func (s *mspan) countAlloc() int { … } // Read the bytes starting at the aligned pointer p into a uintptr. // Read is little-endian. func readUintptr(p *byte) uintptr { … } var debugPtrmask … // progToPointerMask returns the 1-bit pointer mask output by the GC program prog. // size the size of the region described by prog, in bytes. // The resulting bitvector will have no more than size/goarch.PtrSize bits. func progToPointerMask(prog *byte, size uintptr) bitvector { … } // runGCProg returns the number of 1-bit entries written to memory. func runGCProg(prog, dst *byte) uintptr { … } // materializeGCProg allocates space for the (1-bit) pointer bitmask // for an object of size ptrdata. Then it fills that space with the // pointer bitmask specified by the program prog. // The bitmask starts at s.startAddr. // The result must be deallocated with dematerializeGCProg. func materializeGCProg(ptrdata uintptr, prog *byte) *mspan { … } func dematerializeGCProg(s *mspan) { … } func dumpGCProg(p *byte) { … } // reflect_gcbits returns the GC type info for x, for testing. // The result is the bitmap entries (0 or 1), one entry per byte. // //go:linkname reflect_gcbits reflect.gcbits func reflect_gcbits(x any) []byte { … } // Returns GC type info for the pointer stored in ep for testing. // If ep points to the stack, only static live information will be returned // (i.e. not for objects which are only dynamically live stack objects). func getgcmask(ep any) (mask []byte) { … }