go/src/runtime/pprof/pprof_test.go

func cpuHogger(f func(x int) int, y *int, dur time.Duration) {}

var salt1

var salt2

// The actual CPU hogging function.
// Must not call other functions nor access heap/globals in the loop,
// otherwise under race detector the samples will be in the race runtime.
func cpuHog1(x int) int {}

func cpuHog0(x, n int) int {}

func cpuHog2(x int) int {}

// Return a list of functions that we don't want to ever appear in CPU
// profiles. For gccgo, that list includes the sigprof handler itself.
func avoidFunctions() []string {}

func TestCPUProfile(t *testing.T) {}

func TestCPUProfileMultithreaded(t *testing.T) {}

func TestCPUProfileMultithreadMagnitude(t *testing.T) {}

// containsInlinedCall reports whether the function body for the function f is
// known to contain an inlined function call within the first maxBytes bytes.
func containsInlinedCall(f any, maxBytes int) bool {}

// findInlinedCall returns the PC of an inlined function call within
// the function body for the function f if any.
func findInlinedCall(f any, maxBytes int) (pc uint64, found bool) {}

func TestCPUProfileInlining(t *testing.T) {}

func inlinedCaller(x int) int {}

func inlinedCallee(x, n int) int {}

//go:noinline
func dumpCallers(pcs []uintptr) {}

//go:noinline
func inlinedCallerDump(pcs []uintptr) {}

func inlinedCalleeDump(pcs []uintptr) {}

type inlineWrapperInterface

type inlineWrapper

func (h inlineWrapper) dump(pcs []uintptr) {}

func inlinedWrapperCallerDump(pcs []uintptr) {}

func TestCPUProfileRecursion(t *testing.T) {}

func recursionCaller(x int) int {}

func recursionCallee(n, x int) int {}

func recursionChainTop(x int, pcs []uintptr) {}

func recursionChainMiddle(x int, pcs []uintptr) {}

func recursionChainBottom(x int, pcs []uintptr) {}

func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) *profile.Profile {}

func cpuProfilingBroken() bool {}

// testCPUProfile runs f under the CPU profiler, checking for some conditions specified by need,
// as interpreted by matches, and returns the parsed profile.
func testCPUProfile(t *testing.T, matches profileMatchFunc, f func(dur time.Duration)) *profile.Profile {}

var diffCPUTimeImpl

func diffCPUTime(t *testing.T, f func()) (user, system time.Duration) {}

// stackContains matches if a function named spec appears anywhere in the stack trace.
func stackContains(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {}

type sampleMatchFunc

func profileOk(t *testing.T, matches profileMatchFunc, prof bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) {}

type profileMatchFunc

func matchAndAvoidStacks(matches sampleMatchFunc, need []string, avoid []string) profileMatchFunc {}

// Fork can hang if preempted with signals frequently enough (see issue 5517).
// Ensure that we do not do this.
func TestCPUProfileWithFork(t *testing.T) {}

// Test that profiler does not observe runtime.gogo as "user" goroutine execution.
// If it did, it would see inconsistent state and would either record an incorrect stack
// or crash because the stack was malformed.
func TestGoroutineSwitch(t *testing.T) {}

func fprintStack(w io.Writer, stk []*profile.Location) {}

// Test that profiling of division operations is okay, especially on ARM. See issue 6681.
func TestMathBigDivide(t *testing.T) {}

// stackContainsAll matches if all functions in spec (comma-separated) appear somewhere in the stack trace.
func stackContainsAll(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {}

func TestMorestack(t *testing.T) {}

//go:noinline
func growstack1() {}

//go:noinline
func growstack(n int) {}

//go:noinline
func use(x [8 << 18]byte) {}

func TestBlockProfile(t *testing.T) {}

func profileStacks(p *profile.Profile) (res [][]string) {}

func blockRecordStacks(records []runtime.BlockProfileRecord) (res [][]string) {}

func containsStack(got [][]string, want []string) bool {}

// awaitBlockedGoroutine spins on runtime.Gosched until a runtime stack dump
// shows a goroutine in the given state with a stack frame in
// runtime/pprof.<fName>.
func awaitBlockedGoroutine(t *testing.T, state, fName string, count int) {}

func blockChanRecv(t *testing.T) {}

func blockChanSend(t *testing.T) {}

func blockChanClose(t *testing.T) {}

func blockSelectRecvAsync(t *testing.T) {}

func blockSelectSendSync(t *testing.T) {}

func blockMutex(t *testing.T) {}

func blockMutexN(t *testing.T, n int, d time.Duration) {}

func blockCond(t *testing.T) {}

// See http://golang.org/cl/299991.
func TestBlockProfileBias(t *testing.T) {}

// blockFrequentShort produces 100000 block events with an average duration of
// rate / 10.
func blockFrequentShort(rate int) {}

// blockFrequentShort produces 10000 block events with an average duration of
// rate.
func blockInfrequentLong(rate int) {}

// Used by TestBlockProfileBias.
//
//go:linkname blockevent runtime.blockevent
func blockevent(cycles int64, skip int)

func TestMutexProfile(t *testing.T) {}

func TestMutexProfileRateAdjust(t *testing.T) {}

func func1(c chan int) {}

func func2(c chan int) {}

func func3(c chan int) {}

func func4(c chan int) {}

func TestGoroutineCounts(t *testing.T) {}

func containsInOrder(s string, all ...string) bool {}

func containsCountsLabels(prof *profile.Profile, countLabels map[int64]map[string]string) bool {}

func TestGoroutineProfileConcurrency(t *testing.T) {}

// Regression test for #69998.
func TestGoroutineProfileCoro(t *testing.T) {}

func BenchmarkGoroutine(b *testing.B) {}

var emptyCallStackTestRun

// Issue 18836.
func TestEmptyCallStack(t *testing.T) {}

// stackContainsLabeled takes a spec like funcname;key=value and matches if the stack has that key
// and value and has funcname somewhere in the stack.
func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {}

func TestCPUProfileLabel(t *testing.T) {}

func TestLabelRace(t *testing.T) {}

func TestGoroutineProfileLabelRace(t *testing.T) {}

// TestLabelSystemstack makes sure CPU profiler samples of goroutines running
// on systemstack include the correct pprof labels. See issue #48577
func TestLabelSystemstack(t *testing.T) {}

// labelHog is designed to burn CPU time in a way that a high number of CPU
// samples end up running on systemstack.
func labelHog(stop chan struct{}

// parallelLabelHog runs GOMAXPROCS goroutines running labelHog.
func parallelLabelHog(ctx context.Context, dur time.Duration, gogc int) {}

// Check that there is no deadlock when the program receives SIGPROF while in
// 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
func TestAtomicLoadStore64(t *testing.T) {}

func TestTracebackAll(t *testing.T) {}

// TestTryAdd tests the cases that are hard to test with real program execution.
//
// For example, the current go compilers may not always inline functions
// involved in recursion but that may not be true in the future compilers. This
// tests such cases by using fake call sequences and forcing the profile build
// utilizing translateCPUProfile defined in proto_test.go
func TestTryAdd(t *testing.T) {}

func TestTimeVDSO(t *testing.T) {}

func TestProfilerStackDepth(t *testing.T) {}

func hasPrefix(stk []string, prefix []string) bool {}

var _

var _

// allocDeep calls itself n times before calling fn.
func allocDeep(n int) {}

// blockChanDeep produces a block profile event at stack depth n, including the
// caller.
func blockChanDeep(t *testing.T, n int) {}

// blockMutexDeep produces a block profile event at stack depth n, including the
// caller.
func blockMutexDeep(t *testing.T, n int) {}

// goroutineDeep blocks at stack depth n, including the caller until the test is
// finished.
func goroutineDeep(t *testing.T, n int) {}

// produceProfileEvents produces pprof events at the given stack depth and then
// blocks in goroutineDeep until the test completes. The stack traces are
// guaranteed to have exactly the desired depth with produceProfileEvents as
// their root frame which is expected by TestProfilerStackDepth.
func produceProfileEvents(t *testing.T, depth int) {}

func getProfileStacks(collect func([]runtime.BlockProfileRecord) (int, bool), fileLine bool) []string {}

func TestMutexBlockFullAggregation(t *testing.T) {}

func inlineA(mu *sync.Mutex, wg *sync.WaitGroup) {}

func inlineB(mu *sync.Mutex, wg *sync.WaitGroup) {}

func inlineC(mu *sync.Mutex, wg *sync.WaitGroup) {}

func inlineD(mu *sync.Mutex, wg *sync.WaitGroup) {}

func inlineE(mu *sync.Mutex, wg *sync.WaitGroup) {}

func inlineF(mu *sync.Mutex, wg *sync.WaitGroup) {}

func TestBlockMutexProfileInlineExpansion(t *testing.T) {}

func TestProfileRecordNullPadding(t *testing.T) {}

func testProfileRecordNullPadding[T runtime.StackRecord | runtime.MemProfileRecord | runtime.BlockProfileRecord](t *testing.T, name string, fn func([]T) (int, bool)) {}

// disableSampling configures the profilers to capture all events, otherwise
// it's difficult to assert anything.
func disableSampling() func() {}