2016-03-01 15:57:46 -07:00
|
|
|
// Copyright 2013 The Go Authors. All rights reserved.
|
2013-12-13 13:44:57 -07:00
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
// Only works on systems with syscall.Close.
|
|
|
|
// We need a fast system call to provoke the race,
|
|
|
|
// and Close(-1) is nearly universally fast.
|
|
|
|
|
|
|
|
// +build darwin dragonfly freebsd linux netbsd openbsd plan9
|
|
|
|
|
|
|
|
package runtime_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"runtime"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"syscall"
|
|
|
|
"testing"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestGoroutineProfile(t *testing.T) {
|
|
|
|
// GoroutineProfile used to use the wrong starting sp for
|
|
|
|
// goroutines coming out of system calls, causing possible
|
|
|
|
// crashes.
|
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(100))
|
|
|
|
|
|
|
|
var stop uint32
|
|
|
|
defer atomic.StoreUint32(&stop, 1) // in case of panic
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := 0; i < 4; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
for atomic.LoadUint32(&stop) == 0 {
|
|
|
|
syscall.Close(-1)
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
max := 10000
|
|
|
|
if testing.Short() {
|
|
|
|
max = 100
|
|
|
|
}
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
stk := make([]runtime.StackRecord, 128)
|
2013-12-13 13:44:57 -07:00
|
|
|
for n := 0; n < max; n++ {
|
|
|
|
_, ok := runtime.GoroutineProfile(stk)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("GoroutineProfile failed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the program didn't crash, we passed.
|
|
|
|
atomic.StoreUint32(&stop, 1)
|
|
|
|
wg.Wait()
|
|
|
|
}
|