2013-12-18 12:08:34 -07:00
|
|
|
// Copyright 2013 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2014-01-25 09:11:16 -07:00
|
|
|
// Pool is no-op under race detector, so all these tests do not work.
|
2022-01-30 18:13:43 -07:00
|
|
|
//
|
2021-02-19 16:35:10 -07:00
|
|
|
//go:build !race
|
2014-01-25 09:11:16 -07:00
|
|
|
|
2013-12-18 12:08:34 -07:00
|
|
|
package sync_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"runtime"
|
|
|
|
"runtime/debug"
|
2019-03-08 13:01:34 -07:00
|
|
|
"sort"
|
2013-12-18 12:08:34 -07:00
|
|
|
. "sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestPool(t *testing.T) {
|
|
|
|
// disable GC so we can control when it happens.
|
|
|
|
defer debug.SetGCPercent(debug.SetGCPercent(-1))
|
|
|
|
var p Pool
|
|
|
|
if p.Get() != nil {
|
|
|
|
t.Fatal("expected empty")
|
|
|
|
}
|
2017-05-25 16:14:30 -06:00
|
|
|
|
|
|
|
// Make sure that the goroutine doesn't migrate to another P
|
|
|
|
// between Put and Get calls.
|
|
|
|
Runtime_procPin()
|
2013-12-18 12:08:34 -07:00
|
|
|
p.Put("a")
|
|
|
|
p.Put("b")
|
|
|
|
if g := p.Get(); g != "a" {
|
|
|
|
t.Fatalf("got %#v; want a", g)
|
|
|
|
}
|
2014-04-14 11:13:32 -06:00
|
|
|
if g := p.Get(); g != "b" {
|
|
|
|
t.Fatalf("got %#v; want b", g)
|
|
|
|
}
|
2013-12-18 12:08:34 -07:00
|
|
|
if g := p.Get(); g != nil {
|
|
|
|
t.Fatalf("got %#v; want nil", g)
|
|
|
|
}
|
2017-05-25 16:14:30 -06:00
|
|
|
Runtime_procUnpin()
|
2013-12-18 12:08:34 -07:00
|
|
|
|
sync: smooth out Pool behavior over GC with a victim cache
Currently, every Pool is cleared completely at the start of each GC.
This is a problem for heavy users of Pool because it causes an
allocation spike immediately after Pools are clear, which impacts both
throughput and latency.
This CL fixes this by introducing a victim cache mechanism. Instead of
clearing Pools, the victim cache is dropped and the primary cache is
moved to the victim cache. As a result, in steady-state, there are
(roughly) no new allocations, but if Pool usage drops, objects will
still be collected within two GCs (as opposed to one).
This victim cache approach also improves Pool's impact on GC dynamics.
The current approach causes all objects in Pools to be short lived.
However, if an application is in steady state and is just going to
repopulate its Pools, then these objects impact the live heap size *as
if* they were long lived. Since Pooled objects count as short lived
when computing the GC trigger and goal, but act as long lived objects
in the live heap, this causes GC to trigger too frequently. If Pooled
objects are a non-trivial portion of an application's heap, this
increases the CPU overhead of GC. The victim cache lets Pooled objects
affect the GC trigger and goal as long-lived objects.
This has no impact on Get/Put performance, but substantially reduces
the impact to the Pool user when a GC happens. PoolExpensiveNew
demonstrates this in the substantially reduction in the rate at which
the "New" function is called.
name old time/op new time/op delta
Pool-12 2.21ns ±36% 2.00ns ± 0% ~ (p=0.070 n=19+16)
PoolOverflow-12 587ns ± 1% 583ns ± 1% -0.77% (p=0.000 n=18+18)
PoolSTW-12 5.57µs ± 3% 4.52µs ± 4% -18.82% (p=0.000 n=20+19)
PoolExpensiveNew-12 3.69ms ± 7% 1.25ms ± 5% -66.25% (p=0.000 n=20+19)
name old p50-ns/STW new p50-ns/STW delta
PoolSTW-12 5.48k ± 2% 4.53k ± 2% -17.32% (p=0.000 n=20+20)
name old p95-ns/STW new p95-ns/STW delta
PoolSTW-12 6.69k ± 4% 5.13k ± 3% -23.31% (p=0.000 n=19+18)
name old GCs/op new GCs/op delta
PoolExpensiveNew-12 0.39 ± 1% 0.32 ± 2% -17.95% (p=0.000 n=18+20)
name old New/op new New/op delta
PoolExpensiveNew-12 40.0 ± 6% 12.4 ± 6% -68.91% (p=0.000 n=20+19)
(https://perf.golang.org/search?q=upload:20190311.2)
Fixes #22950.
Change-Id: If2e183d948c650417283076aacc20739682cdd70
Reviewed-on: https://go-review.googlesource.com/c/go/+/166961
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2019-03-02 13:16:29 -07:00
|
|
|
// Put in a large number of objects so they spill into
|
|
|
|
// stealable space.
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
p.Put("c")
|
|
|
|
}
|
|
|
|
// After one GC, the victim cache should keep them alive.
|
|
|
|
runtime.GC()
|
|
|
|
if g := p.Get(); g != "c" {
|
|
|
|
t.Fatalf("got %#v; want c after GC", g)
|
|
|
|
}
|
|
|
|
// A second GC should drop the victim cache.
|
2013-12-18 12:08:34 -07:00
|
|
|
runtime.GC()
|
|
|
|
if g := p.Get(); g != nil {
|
sync: smooth out Pool behavior over GC with a victim cache
Currently, every Pool is cleared completely at the start of each GC.
This is a problem for heavy users of Pool because it causes an
allocation spike immediately after Pools are clear, which impacts both
throughput and latency.
This CL fixes this by introducing a victim cache mechanism. Instead of
clearing Pools, the victim cache is dropped and the primary cache is
moved to the victim cache. As a result, in steady-state, there are
(roughly) no new allocations, but if Pool usage drops, objects will
still be collected within two GCs (as opposed to one).
This victim cache approach also improves Pool's impact on GC dynamics.
The current approach causes all objects in Pools to be short lived.
However, if an application is in steady state and is just going to
repopulate its Pools, then these objects impact the live heap size *as
if* they were long lived. Since Pooled objects count as short lived
when computing the GC trigger and goal, but act as long lived objects
in the live heap, this causes GC to trigger too frequently. If Pooled
objects are a non-trivial portion of an application's heap, this
increases the CPU overhead of GC. The victim cache lets Pooled objects
affect the GC trigger and goal as long-lived objects.
This has no impact on Get/Put performance, but substantially reduces
the impact to the Pool user when a GC happens. PoolExpensiveNew
demonstrates this in the substantially reduction in the rate at which
the "New" function is called.
name old time/op new time/op delta
Pool-12 2.21ns ±36% 2.00ns ± 0% ~ (p=0.070 n=19+16)
PoolOverflow-12 587ns ± 1% 583ns ± 1% -0.77% (p=0.000 n=18+18)
PoolSTW-12 5.57µs ± 3% 4.52µs ± 4% -18.82% (p=0.000 n=20+19)
PoolExpensiveNew-12 3.69ms ± 7% 1.25ms ± 5% -66.25% (p=0.000 n=20+19)
name old p50-ns/STW new p50-ns/STW delta
PoolSTW-12 5.48k ± 2% 4.53k ± 2% -17.32% (p=0.000 n=20+20)
name old p95-ns/STW new p95-ns/STW delta
PoolSTW-12 6.69k ± 4% 5.13k ± 3% -23.31% (p=0.000 n=19+18)
name old GCs/op new GCs/op delta
PoolExpensiveNew-12 0.39 ± 1% 0.32 ± 2% -17.95% (p=0.000 n=18+20)
name old New/op new New/op delta
PoolExpensiveNew-12 40.0 ± 6% 12.4 ± 6% -68.91% (p=0.000 n=20+19)
(https://perf.golang.org/search?q=upload:20190311.2)
Fixes #22950.
Change-Id: If2e183d948c650417283076aacc20739682cdd70
Reviewed-on: https://go-review.googlesource.com/c/go/+/166961
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
2019-03-02 13:16:29 -07:00
|
|
|
t.Fatalf("got %#v; want nil after second GC", g)
|
2013-12-18 12:08:34 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestPoolNew(t *testing.T) {
|
|
|
|
// disable GC so we can control when it happens.
|
|
|
|
defer debug.SetGCPercent(debug.SetGCPercent(-1))
|
|
|
|
|
|
|
|
i := 0
|
|
|
|
p := Pool{
|
2021-12-01 10:15:45 -07:00
|
|
|
New: func() any {
|
2013-12-18 12:08:34 -07:00
|
|
|
i++
|
|
|
|
return i
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if v := p.Get(); v != 1 {
|
|
|
|
t.Fatalf("got %v; want 1", v)
|
|
|
|
}
|
|
|
|
if v := p.Get(); v != 2 {
|
|
|
|
t.Fatalf("got %v; want 2", v)
|
|
|
|
}
|
2017-05-25 16:14:30 -06:00
|
|
|
|
|
|
|
// Make sure that the goroutine doesn't migrate to another P
|
|
|
|
// between Put and Get calls.
|
|
|
|
Runtime_procPin()
|
2013-12-18 12:08:34 -07:00
|
|
|
p.Put(42)
|
|
|
|
if v := p.Get(); v != 42 {
|
|
|
|
t.Fatalf("got %v; want 42", v)
|
|
|
|
}
|
2017-05-25 16:14:30 -06:00
|
|
|
Runtime_procUnpin()
|
|
|
|
|
2013-12-18 12:08:34 -07:00
|
|
|
if v := p.Get(); v != 3 {
|
|
|
|
t.Fatalf("got %v; want 3", v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-22 10:23:49 -06:00
|
|
|
// Test that Pool does not hold pointers to previously cached resources.
|
2013-12-18 12:08:34 -07:00
|
|
|
func TestPoolGC(t *testing.T) {
|
2014-10-22 10:23:49 -06:00
|
|
|
testPool(t, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that Pool releases resources on GC.
|
|
|
|
func TestPoolRelease(t *testing.T) {
|
|
|
|
testPool(t, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func testPool(t *testing.T, drain bool) {
|
2013-12-18 12:08:34 -07:00
|
|
|
var p Pool
|
|
|
|
const N = 100
|
2014-10-22 10:23:49 -06:00
|
|
|
loop:
|
|
|
|
for try := 0; try < 3; try++ {
|
2019-05-15 18:49:39 -06:00
|
|
|
if try == 1 && testing.Short() {
|
|
|
|
break
|
|
|
|
}
|
2014-10-22 10:23:49 -06:00
|
|
|
var fin, fin1 uint32
|
|
|
|
for i := 0; i < N; i++ {
|
|
|
|
v := new(string)
|
|
|
|
runtime.SetFinalizer(v, func(vv *string) {
|
|
|
|
atomic.AddUint32(&fin, 1)
|
|
|
|
})
|
|
|
|
p.Put(v)
|
|
|
|
}
|
|
|
|
if drain {
|
|
|
|
for i := 0; i < N; i++ {
|
|
|
|
p.Get()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
runtime.GC()
|
|
|
|
time.Sleep(time.Duration(i*100+10) * time.Millisecond)
|
|
|
|
// 1 pointer can remain on stack or elsewhere
|
|
|
|
if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 {
|
|
|
|
continue loop
|
|
|
|
}
|
2013-12-18 12:08:34 -07:00
|
|
|
}
|
2014-10-22 10:23:49 -06:00
|
|
|
t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try)
|
2013-12-18 12:08:34 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestPoolStress(t *testing.T) {
|
|
|
|
const P = 10
|
|
|
|
N := int(1e6)
|
|
|
|
if testing.Short() {
|
|
|
|
N /= 100
|
|
|
|
}
|
|
|
|
var p Pool
|
|
|
|
done := make(chan bool)
|
|
|
|
for i := 0; i < P; i++ {
|
|
|
|
go func() {
|
2021-12-01 10:15:45 -07:00
|
|
|
var v any = 0
|
2013-12-18 12:08:34 -07:00
|
|
|
for j := 0; j < N; j++ {
|
|
|
|
if v == nil {
|
|
|
|
v = 0
|
|
|
|
}
|
|
|
|
p.Put(v)
|
|
|
|
v = p.Get()
|
|
|
|
if v != nil && v.(int) != 0 {
|
2016-11-14 22:34:58 -07:00
|
|
|
t.Errorf("expect 0, got %v", v)
|
|
|
|
break
|
2013-12-18 12:08:34 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
done <- true
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
for i := 0; i < P; i++ {
|
|
|
|
<-done
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-01 11:16:37 -07:00
|
|
|
func TestPoolDequeue(t *testing.T) {
|
2019-03-01 12:54:00 -07:00
|
|
|
testPoolDequeue(t, NewPoolDequeue(16))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestPoolChain(t *testing.T) {
|
|
|
|
testPoolDequeue(t, NewPoolChain())
|
|
|
|
}
|
|
|
|
|
|
|
|
func testPoolDequeue(t *testing.T, d PoolDequeue) {
|
2019-03-01 11:16:37 -07:00
|
|
|
const P = 10
|
2019-06-26 09:11:45 -06:00
|
|
|
var N int = 2e6
|
2019-03-01 11:16:37 -07:00
|
|
|
if testing.Short() {
|
|
|
|
N = 1e3
|
|
|
|
}
|
|
|
|
have := make([]int32, N)
|
|
|
|
var stop int32
|
|
|
|
var wg WaitGroup
|
2019-06-26 11:20:05 -06:00
|
|
|
record := func(val int) {
|
|
|
|
atomic.AddInt32(&have[val], 1)
|
|
|
|
if val == N-1 {
|
|
|
|
atomic.StoreInt32(&stop, 1)
|
|
|
|
}
|
|
|
|
}
|
2019-03-01 11:16:37 -07:00
|
|
|
|
|
|
|
// Start P-1 consumers.
|
|
|
|
for i := 1; i < P; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
fail := 0
|
|
|
|
for atomic.LoadInt32(&stop) == 0 {
|
|
|
|
val, ok := d.PopTail()
|
|
|
|
if ok {
|
|
|
|
fail = 0
|
2019-06-26 11:20:05 -06:00
|
|
|
record(val.(int))
|
2019-03-01 11:16:37 -07:00
|
|
|
} else {
|
|
|
|
// Speed up the test by
|
|
|
|
// allowing the pusher to run.
|
|
|
|
if fail++; fail%100 == 0 {
|
|
|
|
runtime.Gosched()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start 1 producer.
|
|
|
|
nPopHead := 0
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
for j := 0; j < N; j++ {
|
|
|
|
for !d.PushHead(j) {
|
|
|
|
// Allow a popper to run.
|
|
|
|
runtime.Gosched()
|
|
|
|
}
|
|
|
|
if j%10 == 0 {
|
|
|
|
val, ok := d.PopHead()
|
|
|
|
if ok {
|
|
|
|
nPopHead++
|
2019-06-26 11:20:05 -06:00
|
|
|
record(val.(int))
|
2019-03-01 11:16:37 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
// Check results.
|
|
|
|
for i, count := range have {
|
|
|
|
if count != 1 {
|
|
|
|
t.Errorf("expected have[%d] = 1, got %d", i, count)
|
|
|
|
}
|
|
|
|
}
|
2019-06-26 12:35:05 -06:00
|
|
|
// Check that at least some PopHeads succeeded. We skip this
|
|
|
|
// check in short mode because it's common enough that the
|
|
|
|
// queue will stay nearly empty all the time and a PopTail
|
|
|
|
// will happen during the window between every PushHead and
|
|
|
|
// PopHead.
|
|
|
|
if !testing.Short() && nPopHead == 0 {
|
2019-03-01 11:16:37 -07:00
|
|
|
t.Errorf("popHead never succeeded")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-29 09:59:20 -06:00
|
|
|
func TestNilPool(t *testing.T) {
|
|
|
|
catch := func() {
|
|
|
|
if recover() == nil {
|
|
|
|
t.Error("expected panic")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var p *Pool
|
|
|
|
t.Run("Get", func(t *testing.T) {
|
|
|
|
defer catch()
|
|
|
|
if p.Get() != nil {
|
|
|
|
t.Error("expected empty")
|
|
|
|
}
|
|
|
|
t.Error("should have panicked already")
|
|
|
|
})
|
|
|
|
t.Run("Put", func(t *testing.T) {
|
|
|
|
defer catch()
|
|
|
|
p.Put("a")
|
|
|
|
t.Error("should have panicked already")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2013-12-18 12:08:34 -07:00
|
|
|
func BenchmarkPool(b *testing.B) {
|
2014-01-24 11:29:53 -07:00
|
|
|
var p Pool
|
2014-02-25 03:39:12 -07:00
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
|
|
|
p.Put(1)
|
|
|
|
p.Get()
|
|
|
|
}
|
|
|
|
})
|
2014-01-24 11:29:53 -07:00
|
|
|
}
|
|
|
|
|
2014-07-17 13:50:56 -06:00
|
|
|
func BenchmarkPoolOverflow(b *testing.B) {
|
2013-12-18 12:08:34 -07:00
|
|
|
var p Pool
|
2014-02-25 03:39:12 -07:00
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
|
|
|
for b := 0; b < 100; b++ {
|
|
|
|
p.Put(1)
|
2013-12-18 12:08:34 -07:00
|
|
|
}
|
2014-02-25 03:39:12 -07:00
|
|
|
for b := 0; b < 100; b++ {
|
|
|
|
p.Get()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
2013-12-18 12:08:34 -07:00
|
|
|
}
|
2019-03-08 13:01:34 -07:00
|
|
|
|
2021-11-01 12:17:49 -06:00
|
|
|
// Simulate object starvation in order to force Ps to steal objects
|
|
|
|
// from other Ps.
|
|
|
|
func BenchmarkPoolStarvation(b *testing.B) {
|
|
|
|
var p Pool
|
|
|
|
count := 100
|
|
|
|
// Reduce number of putted objects by 33 %. It creates objects starvation
|
|
|
|
// that force P-local storage to steal objects from other Ps.
|
|
|
|
countStarved := count - int(float32(count)*0.33)
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
|
|
|
for b := 0; b < countStarved; b++ {
|
|
|
|
p.Put(1)
|
|
|
|
}
|
|
|
|
for b := 0; b < count; b++ {
|
|
|
|
p.Get()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-12-01 10:15:45 -07:00
|
|
|
var globalSink any
|
2019-03-08 13:01:34 -07:00
|
|
|
|
|
|
|
func BenchmarkPoolSTW(b *testing.B) {
|
|
|
|
// Take control of GC.
|
|
|
|
defer debug.SetGCPercent(debug.SetGCPercent(-1))
|
|
|
|
|
|
|
|
var mstats runtime.MemStats
|
|
|
|
var pauses []uint64
|
|
|
|
|
|
|
|
var p Pool
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
// Put a large number of items into a pool.
|
|
|
|
const N = 100000
|
2021-12-01 10:15:45 -07:00
|
|
|
var item any = 42
|
2019-03-08 13:01:34 -07:00
|
|
|
for i := 0; i < N; i++ {
|
|
|
|
p.Put(item)
|
|
|
|
}
|
|
|
|
// Do a GC.
|
|
|
|
runtime.GC()
|
|
|
|
// Record pause time.
|
|
|
|
runtime.ReadMemStats(&mstats)
|
|
|
|
pauses = append(pauses, mstats.PauseNs[(mstats.NumGC+255)%256])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get pause time stats.
|
|
|
|
sort.Slice(pauses, func(i, j int) bool { return pauses[i] < pauses[j] })
|
|
|
|
var total uint64
|
|
|
|
for _, ns := range pauses {
|
|
|
|
total += ns
|
|
|
|
}
|
|
|
|
// ns/op for this benchmark is average STW time.
|
|
|
|
b.ReportMetric(float64(total)/float64(b.N), "ns/op")
|
|
|
|
b.ReportMetric(float64(pauses[len(pauses)*95/100]), "p95-ns/STW")
|
|
|
|
b.ReportMetric(float64(pauses[len(pauses)*50/100]), "p50-ns/STW")
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkPoolExpensiveNew(b *testing.B) {
|
|
|
|
// Populate a pool with items that are expensive to construct
|
|
|
|
// to stress pool cleanup and subsequent reconstruction.
|
|
|
|
|
|
|
|
// Create a ballast so the GC has a non-zero heap size and
|
|
|
|
// runs at reasonable times.
|
|
|
|
globalSink = make([]byte, 8<<20)
|
|
|
|
defer func() { globalSink = nil }()
|
|
|
|
|
|
|
|
// Create a pool that's "expensive" to fill.
|
|
|
|
var p Pool
|
|
|
|
var nNew uint64
|
2021-12-01 10:15:45 -07:00
|
|
|
p.New = func() any {
|
2019-03-08 13:01:34 -07:00
|
|
|
atomic.AddUint64(&nNew, 1)
|
|
|
|
time.Sleep(time.Millisecond)
|
|
|
|
return 42
|
|
|
|
}
|
|
|
|
var mstats1, mstats2 runtime.MemStats
|
|
|
|
runtime.ReadMemStats(&mstats1)
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
// Simulate 100X the number of goroutines having items
|
|
|
|
// checked out from the Pool simultaneously.
|
2021-12-01 10:15:45 -07:00
|
|
|
items := make([]any, 100)
|
2019-03-08 13:01:34 -07:00
|
|
|
var sink []byte
|
|
|
|
for pb.Next() {
|
|
|
|
// Stress the pool.
|
|
|
|
for i := range items {
|
|
|
|
items[i] = p.Get()
|
|
|
|
// Simulate doing some work with this
|
|
|
|
// item checked out.
|
|
|
|
sink = make([]byte, 32<<10)
|
|
|
|
}
|
|
|
|
for i, v := range items {
|
|
|
|
p.Put(v)
|
|
|
|
items[i] = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ = sink
|
|
|
|
})
|
|
|
|
runtime.ReadMemStats(&mstats2)
|
|
|
|
|
|
|
|
b.ReportMetric(float64(mstats2.NumGC-mstats1.NumGC)/float64(b.N), "GCs/op")
|
|
|
|
b.ReportMetric(float64(nNew)/float64(b.N), "New/op")
|
|
|
|
}
|