1
0
mirror of https://github.com/golang/go synced 2024-10-04 11:11:21 -06:00
go/src/sync/pool_test.go
Dmitriy Vyukov af3868f187 sync: release Pool memory during second and later GCs
Pool memory was only being released during the first GC after the first Put.

Put assumes that p.local != nil means p is on the allPools list.
poolCleanup (called during each GC) removed each pool from allPools
but did not clear p.local, so each pool was cleared by exactly one GC
and then never cleared again.

This bug was introduced late in the Go 1.3 release cycle.

Fixes #8979.

LGTM=rsc
R=golang-codereviews, bradfitz, r, rsc
CC=golang-codereviews, khr
https://golang.org/cl/162980043
2014-10-22 20:23:49 +04:00

164 lines
3.0 KiB
Go

// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Pool is no-op under race detector, so all these tests do not work.
// +build !race
package sync_test
import (
"runtime"
"runtime/debug"
. "sync"
"sync/atomic"
"testing"
"time"
)
func TestPool(t *testing.T) {
// disable GC so we can control when it happens.
defer debug.SetGCPercent(debug.SetGCPercent(-1))
var p Pool
if p.Get() != nil {
t.Fatal("expected empty")
}
p.Put("a")
p.Put("b")
if g := p.Get(); g != "a" {
t.Fatalf("got %#v; want a", g)
}
if g := p.Get(); g != "b" {
t.Fatalf("got %#v; want b", g)
}
if g := p.Get(); g != nil {
t.Fatalf("got %#v; want nil", g)
}
p.Put("c")
debug.SetGCPercent(100) // to allow following GC to actually run
runtime.GC()
if g := p.Get(); g != nil {
t.Fatalf("got %#v; want nil after GC", g)
}
}
func TestPoolNew(t *testing.T) {
// disable GC so we can control when it happens.
defer debug.SetGCPercent(debug.SetGCPercent(-1))
i := 0
p := Pool{
New: func() interface{} {
i++
return i
},
}
if v := p.Get(); v != 1 {
t.Fatalf("got %v; want 1", v)
}
if v := p.Get(); v != 2 {
t.Fatalf("got %v; want 2", v)
}
p.Put(42)
if v := p.Get(); v != 42 {
t.Fatalf("got %v; want 42", v)
}
if v := p.Get(); v != 3 {
t.Fatalf("got %v; want 3", v)
}
}
// Test that Pool does not hold pointers to previously cached resources.
func TestPoolGC(t *testing.T) {
testPool(t, true)
}
// Test that Pool releases resources on GC.
func TestPoolRelease(t *testing.T) {
testPool(t, false)
}
func testPool(t *testing.T, drain bool) {
var p Pool
const N = 100
loop:
for try := 0; try < 3; try++ {
var fin, fin1 uint32
for i := 0; i < N; i++ {
v := new(string)
runtime.SetFinalizer(v, func(vv *string) {
atomic.AddUint32(&fin, 1)
})
p.Put(v)
}
if drain {
for i := 0; i < N; i++ {
p.Get()
}
}
for i := 0; i < 5; i++ {
runtime.GC()
time.Sleep(time.Duration(i*100+10) * time.Millisecond)
// 1 pointer can remain on stack or elsewhere
if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 {
continue loop
}
}
t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try)
}
}
func TestPoolStress(t *testing.T) {
const P = 10
N := int(1e6)
if testing.Short() {
N /= 100
}
var p Pool
done := make(chan bool)
for i := 0; i < P; i++ {
go func() {
var v interface{} = 0
for j := 0; j < N; j++ {
if v == nil {
v = 0
}
p.Put(v)
v = p.Get()
if v != nil && v.(int) != 0 {
t.Fatalf("expect 0, got %v", v)
}
}
done <- true
}()
}
for i := 0; i < P; i++ {
<-done
}
}
func BenchmarkPool(b *testing.B) {
var p Pool
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
p.Put(1)
p.Get()
}
})
}
func BenchmarkPoolOverflow(b *testing.B) {
var p Pool
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
for b := 0; b < 100; b++ {
p.Put(1)
}
for b := 0; b < 100; b++ {
p.Get()
}
}
})
}