1
0
mirror of https://github.com/golang/go synced 2024-10-02 18:18:33 -06:00
go/src/runtime/gc_test.go
Austin Clements 61f56e925e runtime: fix pagesInUse accounting
When we grow the heap, we create a temporary "in use" span for the
memory acquired from the OS and then free that span to link it into
the heap. Hence, we (1) increase pagesInUse when we make the temporary
span so that (2) freeing the span will correctly decrease it.

However, currently step (1) increases pagesInUse by the number of
pages requested from the heap, while step (2) decreases it by the
number of pages requested from the OS (the size of the temporary
span). These aren't necessarily the same, since we round up the number
of pages we request from the OS, so steps 1 and 2 don't necessarily
cancel out like they're supposed to. Over time, this can add up and
cause pagesInUse to underflow and wrap around to 2^64. The garbage
collector computes the sweep ratio from this, so if this happens, the
sweep ratio becomes effectively infinite, causing the first allocation
on each P in a sweep cycle to sweep the entire heap. This makes
sweeping effectively STW.

Fix this by increasing pagesInUse in step 1 by the number of pages
requested from the OS, so that the two steps correctly cancel out. We
add a test that checks that the running total matches the actual state
of the heap.

Fixes #15022. For 1.6.x.

Change-Id: Iefd9d6abe37d0d447cbdbdf9941662e4f18eeffc
Reviewed-on: https://go-review.googlesource.com/21280
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2016-04-04 15:33:26 +00:00

493 lines
9.5 KiB
Go

// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"io"
"os"
"reflect"
"runtime"
"runtime/debug"
"testing"
"time"
"unsafe"
)
func TestGcSys(t *testing.T) {
if os.Getenv("GOGC") == "off" {
t.Skip("skipping test; GOGC=off in environment")
}
got := runTestProg(t, "testprog", "GCSys")
want := "OK\n"
if got != want {
t.Fatalf("expected %q, but got %q", want, got)
}
}
func TestGcDeepNesting(t *testing.T) {
type T [2][2][2][2][2][2][2][2][2][2]*int
a := new(T)
// Prevent the compiler from applying escape analysis.
// This makes sure new(T) is allocated on heap, not on the stack.
t.Logf("%p", a)
a[0][0][0][0][0][0][0][0][0][0] = new(int)
*a[0][0][0][0][0][0][0][0][0][0] = 13
runtime.GC()
if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
t.Fail()
}
}
func TestGcHashmapIndirection(t *testing.T) {
defer debug.SetGCPercent(debug.SetGCPercent(1))
runtime.GC()
type T struct {
a [256]int
}
m := make(map[T]T)
for i := 0; i < 2000; i++ {
var a T
a.a[0] = i
m[a] = T{}
}
}
func TestGcArraySlice(t *testing.T) {
type X struct {
buf [1]byte
nextbuf []byte
next *X
}
var head *X
for i := 0; i < 10; i++ {
p := &X{}
p.buf[0] = 42
p.next = head
if head != nil {
p.nextbuf = head.buf[:]
}
head = p
runtime.GC()
}
for p := head; p != nil; p = p.next {
if p.buf[0] != 42 {
t.Fatal("corrupted heap")
}
}
}
func TestGcRescan(t *testing.T) {
type X struct {
c chan error
nextx *X
}
type Y struct {
X
nexty *Y
p *int
}
var head *Y
for i := 0; i < 10; i++ {
p := &Y{}
p.c = make(chan error)
if head != nil {
p.nextx = &head.X
}
p.nexty = head
p.p = new(int)
*p.p = 42
head = p
runtime.GC()
}
for p := head; p != nil; p = p.nexty {
if *p.p != 42 {
t.Fatal("corrupted heap")
}
}
}
func TestGcLastTime(t *testing.T) {
ms := new(runtime.MemStats)
t0 := time.Now().UnixNano()
runtime.GC()
t1 := time.Now().UnixNano()
runtime.ReadMemStats(ms)
last := int64(ms.LastGC)
if t0 > last || last > t1 {
t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
}
pause := ms.PauseNs[(ms.NumGC+255)%256]
// Due to timer granularity, pause can actually be 0 on windows
// or on virtualized environments.
if pause == 0 {
t.Logf("last GC pause was 0")
} else if pause > 10e9 {
t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
}
}
var hugeSink interface{}
func TestHugeGCInfo(t *testing.T) {
// The test ensures that compiler can chew these huge types even on weakest machines.
// The types are not allocated at runtime.
if hugeSink != nil {
// 400MB on 32 bots, 4TB on 64-bits.
const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
hugeSink = new([n]*byte)
hugeSink = new([n]uintptr)
hugeSink = new(struct {
x float64
y [n]*byte
z []string
})
hugeSink = new(struct {
x float64
y [n]uintptr
z []string
})
}
}
func TestPeriodicGC(t *testing.T) {
// Make sure we're not in the middle of a GC.
runtime.GC()
var ms1, ms2 runtime.MemStats
runtime.ReadMemStats(&ms1)
// Make periodic GC run continuously.
orig := *runtime.ForceGCPeriod
*runtime.ForceGCPeriod = 0
// Let some periodic GCs happen. In a heavily loaded system,
// it's possible these will be delayed, so this is designed to
// succeed quickly if things are working, but to give it some
// slack if things are slow.
var numGCs uint32
const want = 2
for i := 0; i < 20 && numGCs < want; i++ {
time.Sleep(5 * time.Millisecond)
// Test that periodic GC actually happened.
runtime.ReadMemStats(&ms2)
numGCs = ms2.NumGC - ms1.NumGC
}
*runtime.ForceGCPeriod = orig
if numGCs < want {
t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
}
}
func BenchmarkSetTypePtr(b *testing.B) {
benchSetType(b, new(*byte))
}
func BenchmarkSetTypePtr8(b *testing.B) {
benchSetType(b, new([8]*byte))
}
func BenchmarkSetTypePtr16(b *testing.B) {
benchSetType(b, new([16]*byte))
}
func BenchmarkSetTypePtr32(b *testing.B) {
benchSetType(b, new([32]*byte))
}
func BenchmarkSetTypePtr64(b *testing.B) {
benchSetType(b, new([64]*byte))
}
func BenchmarkSetTypePtr126(b *testing.B) {
benchSetType(b, new([126]*byte))
}
func BenchmarkSetTypePtr128(b *testing.B) {
benchSetType(b, new([128]*byte))
}
func BenchmarkSetTypePtrSlice(b *testing.B) {
benchSetType(b, make([]*byte, 1<<10))
}
type Node1 struct {
Value [1]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode1(b *testing.B) {
benchSetType(b, new(Node1))
}
func BenchmarkSetTypeNode1Slice(b *testing.B) {
benchSetType(b, make([]Node1, 32))
}
type Node8 struct {
Value [8]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode8(b *testing.B) {
benchSetType(b, new(Node8))
}
func BenchmarkSetTypeNode8Slice(b *testing.B) {
benchSetType(b, make([]Node8, 32))
}
type Node64 struct {
Value [64]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode64(b *testing.B) {
benchSetType(b, new(Node64))
}
func BenchmarkSetTypeNode64Slice(b *testing.B) {
benchSetType(b, make([]Node64, 32))
}
type Node64Dead struct {
Left, Right *byte
Value [64]uintptr
}
func BenchmarkSetTypeNode64Dead(b *testing.B) {
benchSetType(b, new(Node64Dead))
}
func BenchmarkSetTypeNode64DeadSlice(b *testing.B) {
benchSetType(b, make([]Node64Dead, 32))
}
type Node124 struct {
Value [124]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode124(b *testing.B) {
benchSetType(b, new(Node124))
}
func BenchmarkSetTypeNode124Slice(b *testing.B) {
benchSetType(b, make([]Node124, 32))
}
type Node126 struct {
Value [126]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode126(b *testing.B) {
benchSetType(b, new(Node126))
}
func BenchmarkSetTypeNode126Slice(b *testing.B) {
benchSetType(b, make([]Node126, 32))
}
type Node128 struct {
Value [128]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode128(b *testing.B) {
benchSetType(b, new(Node128))
}
func BenchmarkSetTypeNode128Slice(b *testing.B) {
benchSetType(b, make([]Node128, 32))
}
type Node130 struct {
Value [130]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode130(b *testing.B) {
benchSetType(b, new(Node130))
}
func BenchmarkSetTypeNode130Slice(b *testing.B) {
benchSetType(b, make([]Node130, 32))
}
type Node1024 struct {
Value [1024]uintptr
Left, Right *byte
}
func BenchmarkSetTypeNode1024(b *testing.B) {
benchSetType(b, new(Node1024))
}
func BenchmarkSetTypeNode1024Slice(b *testing.B) {
benchSetType(b, make([]Node1024, 32))
}
func benchSetType(b *testing.B, x interface{}) {
v := reflect.ValueOf(x)
t := v.Type()
switch t.Kind() {
case reflect.Ptr:
b.SetBytes(int64(t.Elem().Size()))
case reflect.Slice:
b.SetBytes(int64(t.Elem().Size()) * int64(v.Len()))
}
b.ResetTimer()
runtime.BenchSetType(b.N, x)
}
func BenchmarkAllocation(b *testing.B) {
type T struct {
x, y *byte
}
ngo := runtime.GOMAXPROCS(0)
work := make(chan bool, b.N+ngo)
result := make(chan *T)
for i := 0; i < b.N; i++ {
work <- true
}
for i := 0; i < ngo; i++ {
work <- false
}
for i := 0; i < ngo; i++ {
go func() {
var x *T
for <-work {
for i := 0; i < 1000; i++ {
x = &T{}
}
}
result <- x
}()
}
for i := 0; i < ngo; i++ {
<-result
}
}
func TestPrintGC(t *testing.T) {
if testing.Short() {
t.Skip("Skipping in short mode")
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
done := make(chan bool)
go func() {
for {
select {
case <-done:
return
default:
runtime.GC()
}
}
}()
for i := 0; i < 1e4; i++ {
func() {
defer print("")
}()
}
close(done)
}
// The implicit y, ok := x.(error) for the case error
// in testTypeSwitch used to not initialize the result y
// before passing &y to assertE2I2GC.
// Catch this by making assertE2I2 call runtime.GC,
// which will force a stack scan and failure if there are
// bad pointers, and then fill the stack with bad pointers
// and run the type switch.
func TestAssertE2I2Liveness(t *testing.T) {
// Note that this flag is defined in export_test.go
// and is not available to ordinary imports of runtime.
*runtime.TestingAssertE2I2GC = true
defer func() {
*runtime.TestingAssertE2I2GC = false
}()
poisonStack()
testTypeSwitch(io.EOF)
poisonStack()
testAssert(io.EOF)
poisonStack()
testAssertVar(io.EOF)
}
func poisonStack() uintptr {
var x [1000]uintptr
for i := range x {
x[i] = 0xff
}
return x[123]
}
func testTypeSwitch(x interface{}) error {
switch y := x.(type) {
case nil:
// ok
case error:
return y
}
return nil
}
func testAssert(x interface{}) error {
if y, ok := x.(error); ok {
return y
}
return nil
}
func testAssertVar(x interface{}) error {
var y, ok = x.(error)
if ok {
return y
}
return nil
}
func TestAssertE2T2Liveness(t *testing.T) {
*runtime.TestingAssertE2T2GC = true
defer func() {
*runtime.TestingAssertE2T2GC = false
}()
poisonStack()
testIfaceEqual(io.EOF)
}
var a bool
//go:noinline
func testIfaceEqual(x interface{}) {
if x == "abc" {
a = true
}
}
func TestPageAccounting(t *testing.T) {
// Grow the heap in small increments. This used to drop the
// pages-in-use count below zero because of a rounding
// mismatch (golang.org/issue/15022).
const blockSize = 64 << 10
blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
for i := range blocks {
blocks[i] = new([blockSize]byte)
}
// Check that the running page count matches reality.
pagesInUse, counted := runtime.CountPagesInUse()
if pagesInUse != counted {
t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
}
}