2012-03-15 13:22:30 -06:00
|
|
|
// Copyright 2012 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2014-03-19 07:22:56 -06:00
|
|
|
package runtime_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
. "runtime"
|
2014-09-01 17:42:22 -06:00
|
|
|
"strings"
|
2014-03-19 07:22:56 -06:00
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
2013-01-09 22:57:06 -07:00
|
|
|
// TestStackMem measures per-thread stack segment cache behavior.
|
|
|
|
// The test consumed up to 500MB in the past.
|
|
|
|
func TestStackMem(t *testing.T) {
|
|
|
|
const (
|
|
|
|
BatchSize = 32
|
2013-03-08 09:25:21 -07:00
|
|
|
BatchCount = 256
|
2013-01-09 22:57:06 -07:00
|
|
|
ArraySize = 1024
|
|
|
|
RecursionDepth = 128
|
|
|
|
)
|
|
|
|
if testing.Short() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer GOMAXPROCS(GOMAXPROCS(BatchSize))
|
|
|
|
s0 := new(MemStats)
|
|
|
|
ReadMemStats(s0)
|
|
|
|
for b := 0; b < BatchCount; b++ {
|
|
|
|
c := make(chan bool, BatchSize)
|
|
|
|
for i := 0; i < BatchSize; i++ {
|
|
|
|
go func() {
|
|
|
|
var f func(k int, a [ArraySize]byte)
|
|
|
|
f = func(k int, a [ArraySize]byte) {
|
|
|
|
if k == 0 {
|
|
|
|
time.Sleep(time.Millisecond)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
f(k-1, a)
|
|
|
|
}
|
|
|
|
f(RecursionDepth, [ArraySize]byte{})
|
|
|
|
c <- true
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
for i := 0; i < BatchSize; i++ {
|
|
|
|
<-c
|
|
|
|
}
|
2013-03-08 09:25:21 -07:00
|
|
|
|
|
|
|
// The goroutines have signaled via c that they are ready to exit.
|
|
|
|
// Give them a chance to exit by sleeping. If we don't wait, we
|
|
|
|
// might not reuse them on the next batch.
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
2013-01-09 22:57:06 -07:00
|
|
|
}
|
|
|
|
s1 := new(MemStats)
|
|
|
|
ReadMemStats(s1)
|
2014-07-30 12:02:40 -06:00
|
|
|
consumed := int64(s1.StackSys - s0.StackSys)
|
2013-01-09 22:57:06 -07:00
|
|
|
t.Logf("Consumed %vMB for stack mem", consumed>>20)
|
2014-07-30 12:02:40 -06:00
|
|
|
estimate := int64(8 * BatchSize * ArraySize * RecursionDepth) // 8 is to reduce flakiness.
|
2013-01-09 22:57:06 -07:00
|
|
|
if consumed > estimate {
|
|
|
|
t.Fatalf("Stack mem: want %v, got %v", estimate, consumed)
|
|
|
|
}
|
2014-03-12 00:20:58 -06:00
|
|
|
// Due to broken stack memory accounting (http://golang.org/issue/7468),
|
|
|
|
// StackInuse can decrease during function execution, so we cast the values to int64.
|
|
|
|
inuse := int64(s1.StackInuse) - int64(s0.StackInuse)
|
2013-03-12 05:19:06 -06:00
|
|
|
t.Logf("Inuse %vMB for stack mem", inuse>>20)
|
|
|
|
if inuse > 4<<20 {
|
|
|
|
t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse)
|
2013-01-09 22:57:06 -07:00
|
|
|
}
|
|
|
|
}
|
2014-03-19 07:22:56 -06:00
|
|
|
|
|
|
|
// Test stack growing in different contexts.
|
|
|
|
func TestStackGrowth(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
|
|
// in a normal goroutine
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
growStack()
|
|
|
|
}()
|
2014-04-10 22:08:07 -06:00
|
|
|
wg.Wait()
|
2014-03-19 07:22:56 -06:00
|
|
|
|
|
|
|
// in locked goroutine
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
LockOSThread()
|
|
|
|
growStack()
|
|
|
|
UnlockOSThread()
|
|
|
|
}()
|
2014-04-10 22:08:07 -06:00
|
|
|
wg.Wait()
|
2014-03-19 07:22:56 -06:00
|
|
|
|
|
|
|
// in finalizer
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
done := make(chan bool)
|
|
|
|
go func() {
|
|
|
|
s := new(string)
|
|
|
|
SetFinalizer(s, func(ss *string) {
|
2014-04-10 22:08:07 -06:00
|
|
|
growStack()
|
2014-03-19 07:22:56 -06:00
|
|
|
done <- true
|
|
|
|
})
|
|
|
|
s = nil
|
|
|
|
done <- true
|
|
|
|
}()
|
|
|
|
<-done
|
|
|
|
GC()
|
|
|
|
select {
|
|
|
|
case <-done:
|
2014-04-13 18:19:10 -06:00
|
|
|
case <-time.After(20 * time.Second):
|
2014-03-19 07:22:56 -06:00
|
|
|
t.Fatal("finalizer did not run")
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// ... and in init
|
2014-04-10 22:08:07 -06:00
|
|
|
//func init() {
|
|
|
|
// growStack()
|
|
|
|
//}
|
2014-03-19 07:22:56 -06:00
|
|
|
|
|
|
|
func growStack() {
|
2014-04-10 22:08:07 -06:00
|
|
|
n := 1 << 10
|
|
|
|
if testing.Short() {
|
|
|
|
n = 1 << 8
|
|
|
|
}
|
|
|
|
for i := 0; i < n; i++ {
|
2014-03-19 07:22:56 -06:00
|
|
|
x := 0
|
|
|
|
growStackIter(&x, i)
|
|
|
|
if x != i+1 {
|
|
|
|
panic("stack is corrupted")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
GC()
|
|
|
|
}
|
|
|
|
|
runtime: use traceback to traverse defer structures
This makes the GC and the stack copying agree about how
to interpret the defer structures. Previously, only the stack
copying treated them precisely.
This removes an untyped memory allocation and fixes
at least three copystack bugs.
To make sure the GC can find the deferred argument
frame until it has been copied, keep a Defer on the defer list
during its execution.
In addition to making it possible to remove the untyped
memory allocation, keeping the Defer on the list fixes
two races between copystack and execution of defers
(in both gopanic and Goexit). The problem is that once
the defer has been taken off the list, a stack copy that
happens before the deferred arguments have been copied
back to the stack will not update the arguments correctly.
The new tests TestDeferPtrsPanic and TestDeferPtrsGoexit
(variations on the existing TestDeferPtrs) pass now but
failed before this CL.
In addition to those fixes, keeping the Defer on the list
helps correct a dangling pointer error during copystack.
The traceback routines walk the Defer chain to provide
information about where a panic may resume execution.
When the executing Defer was not on the Defer chain
but instead linked from the Panic chain, the traceback
had to walk the Panic chain too. But Panic structs are
on the stack and being updated by copystack.
Traceback's use of the Panic chain while copystack is
updating those structs means that it can follow an
updated pointer and find itself reading from the new stack.
The new stack is usually all zeros, so it sees an incorrect
early end to the chain. The new TestPanicUseStack makes
this happen at tip and dies when adjustdefers finds an
unexpected argp. The new StackCopyPoison mode
causes an earlier bad dereference instead.
By keeping the Defer on the list, traceback can avoid
walking the Panic chain at all, making it okay for copystack
to update the Panics.
We'd have the same problem for any Defers on the stack.
There was only one: gopanic's dabort. Since we are not
taking the executing Defer off the chain, we can use it
to do what dabort was doing, and then there are no
Defers on the stack ever, so it is okay for traceback to use
the Defer chain even while copystack is executing:
copystack cannot modify the Defer chain.
LGTM=khr
R=khr
CC=dvyukov, golang-codereviews, iant, rlh
https://golang.org/cl/141490043
2014-09-16 08:36:38 -06:00
|
|
|
// This function is not an anonymous func, so that the compiler can do escape
|
2014-03-19 07:22:56 -06:00
|
|
|
// analysis and place x on stack (and subsequently stack growth update the pointer).
|
|
|
|
func growStackIter(p *int, n int) {
|
|
|
|
if n == 0 {
|
|
|
|
*p = n + 1
|
|
|
|
GC()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
*p = n + 1
|
|
|
|
x := 0
|
|
|
|
growStackIter(&x, n-1)
|
|
|
|
if x != n {
|
|
|
|
panic("stack is corrupted")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestStackGrowthCallback(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
|
|
// test stack growth at chan op
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
c := make(chan int, 1)
|
|
|
|
growStackWithCallback(func() {
|
|
|
|
c <- 1
|
|
|
|
<-c
|
|
|
|
})
|
|
|
|
}()
|
|
|
|
|
|
|
|
// test stack growth at map op
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
m := make(map[int]int)
|
|
|
|
growStackWithCallback(func() {
|
|
|
|
_, _ = m[1]
|
|
|
|
m[1] = 1
|
|
|
|
})
|
|
|
|
}()
|
|
|
|
|
|
|
|
// test stack growth at goroutine creation
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
growStackWithCallback(func() {
|
|
|
|
done := make(chan bool)
|
|
|
|
go func() {
|
|
|
|
done <- true
|
|
|
|
}()
|
|
|
|
<-done
|
|
|
|
})
|
|
|
|
}()
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
func growStackWithCallback(cb func()) {
|
|
|
|
var f func(n int)
|
|
|
|
f = func(n int) {
|
|
|
|
if n == 0 {
|
|
|
|
cb()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
f(n - 1)
|
|
|
|
}
|
|
|
|
for i := 0; i < 1<<10; i++ {
|
|
|
|
f(i)
|
|
|
|
}
|
|
|
|
}
|
2014-04-07 18:40:00 -06:00
|
|
|
|
|
|
|
// TestDeferPtrs tests the adjustment of Defer's argument pointers (p aka &y)
|
|
|
|
// during a stack copy.
|
|
|
|
func set(p *int, x int) {
|
|
|
|
*p = x
|
|
|
|
}
|
|
|
|
func TestDeferPtrs(t *testing.T) {
|
|
|
|
var y int
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if y != 42 {
|
|
|
|
t.Errorf("defer's stack references were not adjusted appropriately")
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
defer set(&y, 42)
|
|
|
|
growStack()
|
|
|
|
}
|
undo CL 101570044 / 2c57aaea79c4
redo stack allocation. This is mostly the same as
the original CL with a few bug fixes.
1. add racemalloc() for stack allocations
2. fix poolalloc/poolfree to terminate free lists correctly.
3. adjust span ref count correctly.
4. don't use cache for sizes >= StackCacheSize.
Should fix bugs and memory leaks in original changelist.
««« original CL description
undo CL 104200047 / 318b04f28372
Breaks windows and race detector.
TBR=rsc
««« original CL description
runtime: stack allocator, separate from mallocgc
In order to move malloc to Go, we need to have a
separate stack allocator. If we run out of stack
during malloc, malloc will not be available
to allocate a new stack.
Stacks are the last remaining FlagNoGC objects in the
GC heap. Once they are out, we can get rid of the
distinction between the allocated/blockboundary bits.
(This will be in a separate change.)
Fixes #7468
Fixes #7424
LGTM=rsc, dvyukov
R=golang-codereviews, dvyukov, khr, dave, rsc
CC=golang-codereviews
https://golang.org/cl/104200047
»»»
TBR=rsc
CC=golang-codereviews
https://golang.org/cl/101570044
»»»
LGTM=dvyukov
R=dvyukov, dave, khr, alex.brainman
CC=golang-codereviews
https://golang.org/cl/112240044
2014-07-17 15:41:46 -06:00
|
|
|
|
runtime: use traceback to traverse defer structures
This makes the GC and the stack copying agree about how
to interpret the defer structures. Previously, only the stack
copying treated them precisely.
This removes an untyped memory allocation and fixes
at least three copystack bugs.
To make sure the GC can find the deferred argument
frame until it has been copied, keep a Defer on the defer list
during its execution.
In addition to making it possible to remove the untyped
memory allocation, keeping the Defer on the list fixes
two races between copystack and execution of defers
(in both gopanic and Goexit). The problem is that once
the defer has been taken off the list, a stack copy that
happens before the deferred arguments have been copied
back to the stack will not update the arguments correctly.
The new tests TestDeferPtrsPanic and TestDeferPtrsGoexit
(variations on the existing TestDeferPtrs) pass now but
failed before this CL.
In addition to those fixes, keeping the Defer on the list
helps correct a dangling pointer error during copystack.
The traceback routines walk the Defer chain to provide
information about where a panic may resume execution.
When the executing Defer was not on the Defer chain
but instead linked from the Panic chain, the traceback
had to walk the Panic chain too. But Panic structs are
on the stack and being updated by copystack.
Traceback's use of the Panic chain while copystack is
updating those structs means that it can follow an
updated pointer and find itself reading from the new stack.
The new stack is usually all zeros, so it sees an incorrect
early end to the chain. The new TestPanicUseStack makes
this happen at tip and dies when adjustdefers finds an
unexpected argp. The new StackCopyPoison mode
causes an earlier bad dereference instead.
By keeping the Defer on the list, traceback can avoid
walking the Panic chain at all, making it okay for copystack
to update the Panics.
We'd have the same problem for any Defers on the stack.
There was only one: gopanic's dabort. Since we are not
taking the executing Defer off the chain, we can use it
to do what dabort was doing, and then there are no
Defers on the stack ever, so it is okay for traceback to use
the Defer chain even while copystack is executing:
copystack cannot modify the Defer chain.
LGTM=khr
R=khr
CC=dvyukov, golang-codereviews, iant, rlh
https://golang.org/cl/141490043
2014-09-16 08:36:38 -06:00
|
|
|
type bigBuf [4 * 1024]byte
|
|
|
|
|
|
|
|
// TestDeferPtrsGoexit is like TestDeferPtrs but exercises the possibility that the
|
|
|
|
// stack grows as part of starting the deferred function. It calls Goexit at various
|
|
|
|
// stack depths, forcing the deferred function (with >4kB of args) to be run at
|
|
|
|
// the bottom of the stack. The goal is to find a stack depth less than 4kB from
|
|
|
|
// the end of the stack. Each trial runs in a different goroutine so that an earlier
|
|
|
|
// stack growth does not invalidate a later attempt.
|
|
|
|
func TestDeferPtrsGoexit(t *testing.T) {
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
c := make(chan int, 1)
|
|
|
|
go testDeferPtrsGoexit(c, i)
|
|
|
|
if n := <-c; n != 42 {
|
|
|
|
t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func testDeferPtrsGoexit(c chan int, i int) {
|
|
|
|
var y int
|
|
|
|
defer func() {
|
|
|
|
c <- y
|
|
|
|
}()
|
|
|
|
defer setBig(&y, 42, bigBuf{})
|
|
|
|
useStackAndCall(i, Goexit)
|
|
|
|
}
|
|
|
|
|
|
|
|
func setBig(p *int, x int, b bigBuf) {
|
|
|
|
*p = x
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDeferPtrsPanic is like TestDeferPtrsGoexit, but it's using panic instead
|
|
|
|
// of Goexit to run the Defers. Those two are different execution paths
|
|
|
|
// in the runtime.
|
|
|
|
func TestDeferPtrsPanic(t *testing.T) {
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
c := make(chan int, 1)
|
|
|
|
go testDeferPtrsGoexit(c, i)
|
|
|
|
if n := <-c; n != 42 {
|
|
|
|
t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func testDeferPtrsPanic(c chan int, i int) {
|
|
|
|
var y int
|
|
|
|
defer func() {
|
|
|
|
if recover() == nil {
|
|
|
|
c <- -1
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c <- y
|
|
|
|
}()
|
|
|
|
defer setBig(&y, 42, bigBuf{})
|
|
|
|
useStackAndCall(i, func() { panic(1) })
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestPanicUseStack checks that a chain of Panic structs on the stack are
|
|
|
|
// updated correctly if the stack grows during the deferred execution that
|
|
|
|
// happens as a result of the panic.
|
|
|
|
func TestPanicUseStack(t *testing.T) {
|
|
|
|
pc := make([]uintptr, 10000)
|
|
|
|
defer func() {
|
|
|
|
recover()
|
|
|
|
Callers(0, pc) // force stack walk
|
|
|
|
useStackAndCall(100, func() {
|
|
|
|
defer func() {
|
|
|
|
recover()
|
|
|
|
Callers(0, pc) // force stack walk
|
|
|
|
useStackAndCall(200, func() {
|
|
|
|
defer func() {
|
|
|
|
recover()
|
|
|
|
Callers(0, pc) // force stack walk
|
|
|
|
}()
|
|
|
|
panic(3)
|
|
|
|
})
|
|
|
|
}()
|
|
|
|
panic(2)
|
|
|
|
})
|
|
|
|
}()
|
|
|
|
panic(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// use about n KB of stack and call f
|
|
|
|
func useStackAndCall(n int, f func()) {
|
undo CL 101570044 / 2c57aaea79c4
redo stack allocation. This is mostly the same as
the original CL with a few bug fixes.
1. add racemalloc() for stack allocations
2. fix poolalloc/poolfree to terminate free lists correctly.
3. adjust span ref count correctly.
4. don't use cache for sizes >= StackCacheSize.
Should fix bugs and memory leaks in original changelist.
««« original CL description
undo CL 104200047 / 318b04f28372
Breaks windows and race detector.
TBR=rsc
««« original CL description
runtime: stack allocator, separate from mallocgc
In order to move malloc to Go, we need to have a
separate stack allocator. If we run out of stack
during malloc, malloc will not be available
to allocate a new stack.
Stacks are the last remaining FlagNoGC objects in the
GC heap. Once they are out, we can get rid of the
distinction between the allocated/blockboundary bits.
(This will be in a separate change.)
Fixes #7468
Fixes #7424
LGTM=rsc, dvyukov
R=golang-codereviews, dvyukov, khr, dave, rsc
CC=golang-codereviews
https://golang.org/cl/104200047
»»»
TBR=rsc
CC=golang-codereviews
https://golang.org/cl/101570044
»»»
LGTM=dvyukov
R=dvyukov, dave, khr, alex.brainman
CC=golang-codereviews
https://golang.org/cl/112240044
2014-07-17 15:41:46 -06:00
|
|
|
if n == 0 {
|
runtime: use traceback to traverse defer structures
This makes the GC and the stack copying agree about how
to interpret the defer structures. Previously, only the stack
copying treated them precisely.
This removes an untyped memory allocation and fixes
at least three copystack bugs.
To make sure the GC can find the deferred argument
frame until it has been copied, keep a Defer on the defer list
during its execution.
In addition to making it possible to remove the untyped
memory allocation, keeping the Defer on the list fixes
two races between copystack and execution of defers
(in both gopanic and Goexit). The problem is that once
the defer has been taken off the list, a stack copy that
happens before the deferred arguments have been copied
back to the stack will not update the arguments correctly.
The new tests TestDeferPtrsPanic and TestDeferPtrsGoexit
(variations on the existing TestDeferPtrs) pass now but
failed before this CL.
In addition to those fixes, keeping the Defer on the list
helps correct a dangling pointer error during copystack.
The traceback routines walk the Defer chain to provide
information about where a panic may resume execution.
When the executing Defer was not on the Defer chain
but instead linked from the Panic chain, the traceback
had to walk the Panic chain too. But Panic structs are
on the stack and being updated by copystack.
Traceback's use of the Panic chain while copystack is
updating those structs means that it can follow an
updated pointer and find itself reading from the new stack.
The new stack is usually all zeros, so it sees an incorrect
early end to the chain. The new TestPanicUseStack makes
this happen at tip and dies when adjustdefers finds an
unexpected argp. The new StackCopyPoison mode
causes an earlier bad dereference instead.
By keeping the Defer on the list, traceback can avoid
walking the Panic chain at all, making it okay for copystack
to update the Panics.
We'd have the same problem for any Defers on the stack.
There was only one: gopanic's dabort. Since we are not
taking the executing Defer off the chain, we can use it
to do what dabort was doing, and then there are no
Defers on the stack ever, so it is okay for traceback to use
the Defer chain even while copystack is executing:
copystack cannot modify the Defer chain.
LGTM=khr
R=khr
CC=dvyukov, golang-codereviews, iant, rlh
https://golang.org/cl/141490043
2014-09-16 08:36:38 -06:00
|
|
|
f()
|
undo CL 101570044 / 2c57aaea79c4
redo stack allocation. This is mostly the same as
the original CL with a few bug fixes.
1. add racemalloc() for stack allocations
2. fix poolalloc/poolfree to terminate free lists correctly.
3. adjust span ref count correctly.
4. don't use cache for sizes >= StackCacheSize.
Should fix bugs and memory leaks in original changelist.
««« original CL description
undo CL 104200047 / 318b04f28372
Breaks windows and race detector.
TBR=rsc
««« original CL description
runtime: stack allocator, separate from mallocgc
In order to move malloc to Go, we need to have a
separate stack allocator. If we run out of stack
during malloc, malloc will not be available
to allocate a new stack.
Stacks are the last remaining FlagNoGC objects in the
GC heap. Once they are out, we can get rid of the
distinction between the allocated/blockboundary bits.
(This will be in a separate change.)
Fixes #7468
Fixes #7424
LGTM=rsc, dvyukov
R=golang-codereviews, dvyukov, khr, dave, rsc
CC=golang-codereviews
https://golang.org/cl/104200047
»»»
TBR=rsc
CC=golang-codereviews
https://golang.org/cl/101570044
»»»
LGTM=dvyukov
R=dvyukov, dave, khr, alex.brainman
CC=golang-codereviews
https://golang.org/cl/112240044
2014-07-17 15:41:46 -06:00
|
|
|
return
|
|
|
|
}
|
|
|
|
var b [1024]byte // makes frame about 1KB
|
runtime: use traceback to traverse defer structures
This makes the GC and the stack copying agree about how
to interpret the defer structures. Previously, only the stack
copying treated them precisely.
This removes an untyped memory allocation and fixes
at least three copystack bugs.
To make sure the GC can find the deferred argument
frame until it has been copied, keep a Defer on the defer list
during its execution.
In addition to making it possible to remove the untyped
memory allocation, keeping the Defer on the list fixes
two races between copystack and execution of defers
(in both gopanic and Goexit). The problem is that once
the defer has been taken off the list, a stack copy that
happens before the deferred arguments have been copied
back to the stack will not update the arguments correctly.
The new tests TestDeferPtrsPanic and TestDeferPtrsGoexit
(variations on the existing TestDeferPtrs) pass now but
failed before this CL.
In addition to those fixes, keeping the Defer on the list
helps correct a dangling pointer error during copystack.
The traceback routines walk the Defer chain to provide
information about where a panic may resume execution.
When the executing Defer was not on the Defer chain
but instead linked from the Panic chain, the traceback
had to walk the Panic chain too. But Panic structs are
on the stack and being updated by copystack.
Traceback's use of the Panic chain while copystack is
updating those structs means that it can follow an
updated pointer and find itself reading from the new stack.
The new stack is usually all zeros, so it sees an incorrect
early end to the chain. The new TestPanicUseStack makes
this happen at tip and dies when adjustdefers finds an
unexpected argp. The new StackCopyPoison mode
causes an earlier bad dereference instead.
By keeping the Defer on the list, traceback can avoid
walking the Panic chain at all, making it okay for copystack
to update the Panics.
We'd have the same problem for any Defers on the stack.
There was only one: gopanic's dabort. Since we are not
taking the executing Defer off the chain, we can use it
to do what dabort was doing, and then there are no
Defers on the stack ever, so it is okay for traceback to use
the Defer chain even while copystack is executing:
copystack cannot modify the Defer chain.
LGTM=khr
R=khr
CC=dvyukov, golang-codereviews, iant, rlh
https://golang.org/cl/141490043
2014-09-16 08:36:38 -06:00
|
|
|
useStackAndCall(n-1+int(b[99]), f)
|
|
|
|
}
|
|
|
|
|
|
|
|
func useStack(n int) {
|
|
|
|
useStackAndCall(n, func() {})
|
undo CL 101570044 / 2c57aaea79c4
redo stack allocation. This is mostly the same as
the original CL with a few bug fixes.
1. add racemalloc() for stack allocations
2. fix poolalloc/poolfree to terminate free lists correctly.
3. adjust span ref count correctly.
4. don't use cache for sizes >= StackCacheSize.
Should fix bugs and memory leaks in original changelist.
««« original CL description
undo CL 104200047 / 318b04f28372
Breaks windows and race detector.
TBR=rsc
««« original CL description
runtime: stack allocator, separate from mallocgc
In order to move malloc to Go, we need to have a
separate stack allocator. If we run out of stack
during malloc, malloc will not be available
to allocate a new stack.
Stacks are the last remaining FlagNoGC objects in the
GC heap. Once they are out, we can get rid of the
distinction between the allocated/blockboundary bits.
(This will be in a separate change.)
Fixes #7468
Fixes #7424
LGTM=rsc, dvyukov
R=golang-codereviews, dvyukov, khr, dave, rsc
CC=golang-codereviews
https://golang.org/cl/104200047
»»»
TBR=rsc
CC=golang-codereviews
https://golang.org/cl/101570044
»»»
LGTM=dvyukov
R=dvyukov, dave, khr, alex.brainman
CC=golang-codereviews
https://golang.org/cl/112240044
2014-07-17 15:41:46 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func growing(c chan int, done chan struct{}) {
|
|
|
|
for n := range c {
|
|
|
|
useStack(n)
|
|
|
|
done <- struct{}{}
|
|
|
|
}
|
|
|
|
done <- struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestStackCache(t *testing.T) {
|
|
|
|
// Allocate a bunch of goroutines and grow their stacks.
|
|
|
|
// Repeat a few times to test the stack cache.
|
|
|
|
const (
|
|
|
|
R = 4
|
|
|
|
G = 200
|
|
|
|
S = 5
|
|
|
|
)
|
|
|
|
for i := 0; i < R; i++ {
|
|
|
|
var reqchans [G]chan int
|
|
|
|
done := make(chan struct{})
|
|
|
|
for j := 0; j < G; j++ {
|
|
|
|
reqchans[j] = make(chan int)
|
|
|
|
go growing(reqchans[j], done)
|
|
|
|
}
|
|
|
|
for s := 0; s < S; s++ {
|
|
|
|
for j := 0; j < G; j++ {
|
|
|
|
reqchans[j] <- 1 << uint(s)
|
|
|
|
}
|
|
|
|
for j := 0; j < G; j++ {
|
|
|
|
<-done
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for j := 0; j < G; j++ {
|
|
|
|
close(reqchans[j])
|
|
|
|
}
|
|
|
|
for j := 0; j < G; j++ {
|
|
|
|
<-done
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-09-01 17:42:22 -06:00
|
|
|
|
|
|
|
func TestStackOutput(t *testing.T) {
|
|
|
|
b := make([]byte, 1024)
|
|
|
|
stk := string(b[:Stack(b, false)])
|
|
|
|
if !strings.HasPrefix(stk, "goroutine ") {
|
|
|
|
t.Errorf("Stack (len %d):\n%s", len(stk), stk)
|
|
|
|
t.Errorf("Stack output should begin with \"goroutine \"")
|
|
|
|
}
|
|
|
|
}
|
2014-09-03 22:54:06 -06:00
|
|
|
|
|
|
|
func TestStackAllOutput(t *testing.T) {
|
|
|
|
b := make([]byte, 1024)
|
|
|
|
stk := string(b[:Stack(b, true)])
|
|
|
|
if !strings.HasPrefix(stk, "goroutine ") {
|
|
|
|
t.Errorf("Stack (len %d):\n%s", len(stk), stk)
|
|
|
|
t.Errorf("Stack output should begin with \"goroutine \"")
|
|
|
|
}
|
|
|
|
}
|
2014-09-05 08:04:16 -06:00
|
|
|
|
|
|
|
func TestStackPanic(t *testing.T) {
|
|
|
|
// Test that stack copying copies panics correctly. This is difficult
|
|
|
|
// to test because it is very unlikely that the stack will be copied
|
|
|
|
// in the middle of gopanic. But it can happen.
|
|
|
|
// To make this test effective, edit panic.go:gopanic and uncomment
|
|
|
|
// the GC() call just before freedefer(d).
|
|
|
|
defer func() {
|
|
|
|
if x := recover(); x == nil {
|
|
|
|
t.Errorf("recover failed")
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
useStack(32)
|
|
|
|
panic("test panic")
|
|
|
|
}
|
2014-12-27 21:58:00 -07:00
|
|
|
|
|
|
|
func BenchmarkStackCopy(b *testing.B) {
|
|
|
|
c := make(chan bool)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
go func() {
|
|
|
|
count(1000000)
|
|
|
|
c <- true
|
|
|
|
}()
|
|
|
|
<-c
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func count(n int) int {
|
|
|
|
if n == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return 1 + count(n-1)
|
|
|
|
}
|