go/src/runtime/slice_test.go

const N

func BenchmarkMakeSliceCopy(b *testing.B) {}

type struct24

type struct32

type struct40

func BenchmarkMakeSlice(b *testing.B) {}

func BenchmarkGrowSlice(b *testing.B) {}

var SinkIntSlice

var SinkIntPointerSlice

func BenchmarkExtendSlice(b *testing.B) {}

func BenchmarkAppend(b *testing.B) {}

func BenchmarkAppendGrowByte(b *testing.B) {}

func BenchmarkAppendGrowString(b *testing.B) {}

func BenchmarkAppendSlice(b *testing.B) {}

var blackhole

func BenchmarkAppendSliceLarge(b *testing.B) {}

func BenchmarkAppendStr(b *testing.B) {}

func BenchmarkAppendSpecialCase(b *testing.B) {}

var x

func f() int {}

func TestSideEffectOrder(t *testing.T) {}

func TestAppendOverlap(t *testing.T) {}

func BenchmarkCopy(b *testing.B) {}

var sByte

var s1Ptr

var s2Ptr

var s3Ptr

var s4Ptr

// BenchmarkAppendInPlace tests the performance of append
// when the result is being written back to the same slice.
// In order for the in-place optimization to occur,
// the slice must be referred to by address;
// using a global is an easy way to trigger that.
// We test the "grow" and "no grow" paths separately,
// but not the "normal" (occasionally grow) path,
// because it is a blend of the other two.
// We use small numbers and small sizes in an attempt
// to avoid benchmarking memory allocation and copying.
// We use scalars instead of pointers in an attempt
// to avoid benchmarking the write barriers.
// We benchmark four common sizes (byte, pointer, string/interface, slice),
// and one larger size.
func BenchmarkAppendInPlace(b *testing.B) {}