2014-11-11 15:07:06 -07:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
2015-11-02 12:09:24 -07:00
|
|
|
import (
|
|
|
|
"runtime/internal/atomic"
|
2015-11-11 10:39:30 -07:00
|
|
|
"runtime/internal/sys"
|
2015-11-02 12:09:24 -07:00
|
|
|
"unsafe"
|
|
|
|
)
|
2014-11-11 15:07:06 -07:00
|
|
|
|
|
|
|
// Keep a cached value to make gotraceback fast,
|
|
|
|
// since we call it on every call to gentraceback.
|
2015-10-30 09:03:02 -06:00
|
|
|
// The cached value is a uint32 in which the low bits
|
|
|
|
// are the "crash" and "all" settings and the remaining
|
|
|
|
// bits are the traceback value (0 off, 1 on, 2 include system).
|
|
|
|
const (
|
|
|
|
tracebackCrash = 1 << iota
|
|
|
|
tracebackAll
|
|
|
|
tracebackShift = iota
|
|
|
|
)
|
|
|
|
|
|
|
|
var traceback_cache uint32 = 2 << tracebackShift
|
2015-12-18 09:19:38 -07:00
|
|
|
var traceback_env uint32
|
2015-10-30 09:03:02 -06:00
|
|
|
|
|
|
|
// gotraceback returns the current traceback settings.
|
|
|
|
//
|
|
|
|
// If level is 0, suppress all tracebacks.
|
|
|
|
// If level is 1, show tracebacks, but exclude runtime frames.
|
|
|
|
// If level is 2, show tracebacks including runtime frames.
|
|
|
|
// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
|
|
|
|
// If crash is set, crash (core dump, etc) after tracebacking.
|
|
|
|
//
|
2014-11-11 15:07:06 -07:00
|
|
|
//go:nosplit
|
2015-10-30 09:03:02 -06:00
|
|
|
func gotraceback() (level int32, all, crash bool) {
|
2014-11-11 15:07:06 -07:00
|
|
|
_g_ := getg()
|
2017-03-07 13:24:02 -07:00
|
|
|
t := atomic.Load(&traceback_cache)
|
|
|
|
crash = t&tracebackCrash != 0
|
|
|
|
all = _g_.m.throwing > 0 || t&tracebackAll != 0
|
2014-11-11 15:07:06 -07:00
|
|
|
if _g_.m.traceback != 0 {
|
2015-10-30 09:03:02 -06:00
|
|
|
level = int32(_g_.m.traceback)
|
2017-03-07 13:24:02 -07:00
|
|
|
} else {
|
|
|
|
level = int32(t >> tracebackShift)
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-10-30 09:03:02 -06:00
|
|
|
return
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
argc int32
|
|
|
|
argv **byte
|
|
|
|
)
|
|
|
|
|
2016-02-22 23:26:50 -07:00
|
|
|
// nosplit for use in linux startup sysargs
|
2014-11-11 15:07:06 -07:00
|
|
|
//go:nosplit
|
|
|
|
func argv_index(argv **byte, i int32) *byte {
|
2015-11-11 10:39:30 -07:00
|
|
|
return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func args(c int32, v **byte) {
|
|
|
|
argc = c
|
|
|
|
argv = v
|
|
|
|
sysargs(c, v)
|
|
|
|
}
|
|
|
|
|
|
|
|
func goargs() {
|
|
|
|
if GOOS == "windows" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
argslice = make([]string, argc)
|
|
|
|
for i := int32(0); i < argc; i++ {
|
|
|
|
argslice[i] = gostringnocopy(argv_index(argv, i))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func goenvs_unix() {
|
2014-12-16 16:34:55 -07:00
|
|
|
// TODO(austin): ppc64 in dynamic linking mode doesn't
|
2016-03-01 16:21:55 -07:00
|
|
|
// guarantee env[] will immediately follow argv. Might cause
|
2014-12-16 16:34:55 -07:00
|
|
|
// problems.
|
2014-11-11 15:07:06 -07:00
|
|
|
n := int32(0)
|
|
|
|
for argv_index(argv, argc+1+n) != nil {
|
|
|
|
n++
|
|
|
|
}
|
|
|
|
|
|
|
|
envs = make([]string, n)
|
|
|
|
for i := int32(0); i < n; i++ {
|
2015-03-17 08:18:30 -06:00
|
|
|
envs[i] = gostring(argv_index(argv, argc+1+i))
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func environ() []string {
|
|
|
|
return envs
|
|
|
|
}
|
|
|
|
|
2015-01-29 09:54:45 -07:00
|
|
|
// TODO: These should be locals in testAtomic64, but we don't 8-byte
|
|
|
|
// align stack variables on 386.
|
|
|
|
var test_z64, test_x64 uint64
|
|
|
|
|
2014-11-11 15:07:06 -07:00
|
|
|
func testAtomic64() {
|
2015-01-29 09:54:45 -07:00
|
|
|
test_z64 = 42
|
|
|
|
test_x64 = 0
|
|
|
|
prefetcht0(uintptr(unsafe.Pointer(&test_z64)))
|
|
|
|
prefetcht1(uintptr(unsafe.Pointer(&test_z64)))
|
|
|
|
prefetcht2(uintptr(unsafe.Pointer(&test_z64)))
|
|
|
|
prefetchnta(uintptr(unsafe.Pointer(&test_z64)))
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Cas64(&test_z64, test_x64, 1) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-01-29 09:54:45 -07:00
|
|
|
if test_x64 != 0 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-01-29 09:54:45 -07:00
|
|
|
test_x64 = 42
|
2015-11-02 12:09:24 -07:00
|
|
|
if !atomic.Cas64(&test_z64, test_x64, 1) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-01-29 09:54:45 -07:00
|
|
|
if test_x64 != 42 || test_z64 != 1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Load64(&test_z64) != 1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("load64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-02 12:09:24 -07:00
|
|
|
atomic.Store64(&test_z64, (1<<40)+1)
|
|
|
|
if atomic.Load64(&test_z64) != (1<<40)+1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("store64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("xadd64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Load64(&test_z64) != (2<<40)+2 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("xadd64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("xchg64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Load64(&test_z64) != (3<<40)+3 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("xchg64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func check() {
|
|
|
|
var (
|
|
|
|
a int8
|
|
|
|
b uint8
|
|
|
|
c int16
|
|
|
|
d uint16
|
|
|
|
e int32
|
|
|
|
f uint32
|
|
|
|
g int64
|
|
|
|
h uint64
|
|
|
|
i, i1 float32
|
|
|
|
j, j1 float64
|
|
|
|
k, k1 unsafe.Pointer
|
|
|
|
l *uint16
|
2014-11-14 10:10:52 -07:00
|
|
|
m [4]byte
|
2014-11-11 15:07:06 -07:00
|
|
|
)
|
|
|
|
type x1t struct {
|
|
|
|
x uint8
|
|
|
|
}
|
|
|
|
type y1t struct {
|
|
|
|
x1 x1t
|
|
|
|
y uint8
|
|
|
|
}
|
|
|
|
var x1 x1t
|
|
|
|
var y1 y1t
|
|
|
|
|
|
|
|
if unsafe.Sizeof(a) != 1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad a")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(b) != 1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad b")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(c) != 2 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad c")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(d) != 2 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad d")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(e) != 4 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad e")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(f) != 4 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad f")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(g) != 8 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad g")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(h) != 8 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad h")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(i) != 4 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad i")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(j) != 8 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad j")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-11 10:39:30 -07:00
|
|
|
if unsafe.Sizeof(k) != sys.PtrSize {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad k")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-11 10:39:30 -07:00
|
|
|
if unsafe.Sizeof(l) != sys.PtrSize {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad l")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(x1) != 1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad unsafe.Sizeof x1")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Offsetof(y1.y) != 1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad offsetof y1.y")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(y1) != 2 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad unsafe.Sizeof y1")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad timediv")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
var z uint32
|
|
|
|
z = 1
|
2015-11-02 12:09:24 -07:00
|
|
|
if !atomic.Cas(&z, 1, 2) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas1")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if z != 2 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas2")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
z = 4
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Cas(&z, 5, 6) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas3")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if z != 4 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas4")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
2014-11-14 10:10:52 -07:00
|
|
|
z = 0xffffffff
|
2015-11-02 12:09:24 -07:00
|
|
|
if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas5")
|
2014-11-14 10:10:52 -07:00
|
|
|
}
|
|
|
|
if z != 0xfffffffe {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas6")
|
2014-11-14 10:10:52 -07:00
|
|
|
}
|
|
|
|
|
2014-11-11 15:07:06 -07:00
|
|
|
k = unsafe.Pointer(uintptr(0xfedcb123))
|
2015-11-11 10:39:30 -07:00
|
|
|
if sys.PtrSize == 8 {
|
2016-02-29 16:01:00 -07:00
|
|
|
k = unsafe.Pointer(uintptr(k) << 10)
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if casp(&k, nil, nil) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("casp1")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
k1 = add(k, 1)
|
|
|
|
if !casp(&k, k, k1) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("casp2")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if k != k1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("casp3")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
2014-11-14 10:10:52 -07:00
|
|
|
m = [4]byte{1, 1, 1, 1}
|
2015-11-02 12:09:24 -07:00
|
|
|
atomic.Or8(&m[1], 0xf0)
|
2014-11-14 10:10:52 -07:00
|
|
|
if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("atomicor8")
|
2014-11-14 10:10:52 -07:00
|
|
|
}
|
|
|
|
|
2017-03-21 07:57:58 -06:00
|
|
|
m = [4]byte{0xff, 0xff, 0xff, 0xff}
|
|
|
|
atomic.And8(&m[1], 0x1)
|
|
|
|
if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
|
|
|
|
throw("atomicand8")
|
|
|
|
}
|
|
|
|
|
2014-11-11 15:07:06 -07:00
|
|
|
*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
|
|
|
|
if j == j {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float64nan")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if !(j != j) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float64nan1")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
|
|
|
|
if j == j1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float64nan2")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if !(j != j1) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float64nan3")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
|
|
|
|
if i == i {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float32nan")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if i == i {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float32nan1")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
|
|
|
|
if i == i1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float32nan2")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if i == i1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float32nan3")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
testAtomic64()
|
|
|
|
|
|
|
|
if _FixedStack != round2(_FixedStack) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("FixedStack is not power-of-2")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-09-03 00:44:26 -06:00
|
|
|
|
|
|
|
if !checkASM() {
|
|
|
|
throw("assembly checks failed")
|
|
|
|
}
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
type dbgVar struct {
|
|
|
|
name string
|
|
|
|
value *int32
|
|
|
|
}
|
|
|
|
|
2015-01-28 11:28:59 -07:00
|
|
|
// Holds variables parsed from GODEBUG env var,
|
2015-02-03 11:13:31 -07:00
|
|
|
// except for "memprofilerate" since there is an
|
2015-01-28 11:28:59 -07:00
|
|
|
// existing int var for that value, which may
|
|
|
|
// already have an initial value.
|
2014-12-22 08:53:51 -07:00
|
|
|
var debug struct {
|
2017-02-09 12:03:49 -07:00
|
|
|
allocfreetrace int32
|
|
|
|
cgocheck int32
|
|
|
|
efence int32
|
|
|
|
gccheckmark int32
|
|
|
|
gcpacertrace int32
|
|
|
|
gcshrinkstackoff int32
|
|
|
|
gcrescanstacks int32
|
|
|
|
gcstoptheworld int32
|
|
|
|
gctrace int32
|
|
|
|
invalidptr int32
|
|
|
|
sbrk int32
|
|
|
|
scavenge int32
|
|
|
|
scheddetail int32
|
|
|
|
schedtrace int32
|
2014-12-22 08:53:51 -07:00
|
|
|
}
|
2014-11-11 15:07:06 -07:00
|
|
|
|
|
|
|
var dbgvars = []dbgVar{
|
|
|
|
{"allocfreetrace", &debug.allocfreetrace},
|
2015-10-16 16:26:00 -06:00
|
|
|
{"cgocheck", &debug.cgocheck},
|
2014-11-11 15:07:06 -07:00
|
|
|
{"efence", &debug.efence},
|
2015-06-05 09:51:49 -06:00
|
|
|
{"gccheckmark", &debug.gccheckmark},
|
|
|
|
{"gcpacertrace", &debug.gcpacertrace},
|
|
|
|
{"gcshrinkstackoff", &debug.gcshrinkstackoff},
|
runtime: disable stack rescanning by default
With the hybrid barrier in place, we can now disable stack rescanning
by default. This commit adds a "gcrescanstacks" GODEBUG variable that
is off by default but can be set to re-enable STW stack rescanning.
The plan is to leave this off but available in Go 1.8 for debugging
and as a fallback.
With this change, worst-case mark termination time at GOMAXPROCS=12
*not* including time spent stopping the world (which is still
unbounded) is reliably under 100 µs, with a 95%ile around 50 µs in
every benchmark I tried (the go1 benchmarks, the x/benchmarks garbage
benchmark, and the gcbench activegs and rpc benchmarks). Including
time spent stopping the world usually adds about 20 µs to total STW
time at GOMAXPROCS=12, but I've seen it add around 150 µs in these
benchmarks when a goroutine takes time to reach a safe point (see
issue #10958) or when stopping the world races with goroutine
switches. At GOMAXPROCS=1, where this isn't an issue, worst case STW
is typically 30 µs.
The go-gcbench activegs benchmark is designed to stress large numbers
of dirty stacks. This commit reduces 95%ile STW time for 500k dirty
stacks by nearly three orders of magnitude, from 150ms to 195µs.
This has little effect on the throughput of the go1 benchmarks or the
x/benchmarks benchmarks.
name old time/op new time/op delta
XGarbage-12 2.31ms ± 0% 2.32ms ± 1% +0.28% (p=0.001 n=17+16)
XJSON-12 12.4ms ± 0% 12.4ms ± 0% +0.41% (p=0.000 n=18+18)
XHTTP-12 11.8µs ± 0% 11.8µs ± 1% ~ (p=0.492 n=20+18)
It reduces the tail latency of the x/benchmarks HTTP benchmark:
name old p50-time new p50-time delta
XHTTP-12 489µs ± 0% 491µs ± 1% +0.54% (p=0.000 n=20+18)
name old p95-time new p95-time delta
XHTTP-12 957µs ± 1% 960µs ± 1% +0.28% (p=0.002 n=20+17)
name old p99-time new p99-time delta
XHTTP-12 1.76ms ± 1% 1.64ms ± 1% -7.20% (p=0.000 n=20+18)
Comparing to the beginning of the hybrid barrier implementation
("runtime: parallelize STW mcache flushing") shows that the hybrid
barrier trades a small performance impact for much better STW latency,
as expected. The magnitude of the performance impact is generally
small:
name old time/op new time/op delta
BinaryTree17-12 2.37s ± 1% 2.42s ± 1% +2.04% (p=0.000 n=19+18)
Fannkuch11-12 2.84s ± 0% 2.72s ± 0% -4.00% (p=0.000 n=19+19)
FmtFprintfEmpty-12 44.2ns ± 1% 45.2ns ± 1% +2.20% (p=0.000 n=17+19)
FmtFprintfString-12 130ns ± 1% 134ns ± 0% +2.94% (p=0.000 n=18+16)
FmtFprintfInt-12 114ns ± 1% 117ns ± 0% +3.01% (p=0.000 n=19+15)
FmtFprintfIntInt-12 176ns ± 1% 182ns ± 0% +3.17% (p=0.000 n=20+15)
FmtFprintfPrefixedInt-12 186ns ± 1% 187ns ± 1% +1.04% (p=0.000 n=20+19)
FmtFprintfFloat-12 251ns ± 1% 250ns ± 1% -0.74% (p=0.000 n=17+18)
FmtManyArgs-12 746ns ± 1% 761ns ± 0% +2.08% (p=0.000 n=19+20)
GobDecode-12 6.57ms ± 1% 6.65ms ± 1% +1.11% (p=0.000 n=19+20)
GobEncode-12 5.59ms ± 1% 5.65ms ± 0% +1.08% (p=0.000 n=17+17)
Gzip-12 223ms ± 1% 223ms ± 1% -0.31% (p=0.006 n=20+20)
Gunzip-12 38.0ms ± 0% 37.9ms ± 1% -0.25% (p=0.009 n=19+20)
HTTPClientServer-12 77.5µs ± 1% 78.9µs ± 2% +1.89% (p=0.000 n=20+20)
JSONEncode-12 14.7ms ± 1% 14.9ms ± 0% +0.75% (p=0.000 n=20+20)
JSONDecode-12 53.0ms ± 1% 55.9ms ± 1% +5.54% (p=0.000 n=19+19)
Mandelbrot200-12 3.81ms ± 0% 3.81ms ± 1% +0.20% (p=0.023 n=17+19)
GoParse-12 3.17ms ± 1% 3.18ms ± 1% ~ (p=0.057 n=20+19)
RegexpMatchEasy0_32-12 71.7ns ± 1% 70.4ns ± 1% -1.77% (p=0.000 n=19+20)
RegexpMatchEasy0_1K-12 946ns ± 0% 946ns ± 0% ~ (p=0.405 n=18+18)
RegexpMatchEasy1_32-12 67.2ns ± 2% 67.3ns ± 2% ~ (p=0.732 n=20+20)
RegexpMatchEasy1_1K-12 374ns ± 1% 378ns ± 1% +1.14% (p=0.000 n=18+19)
RegexpMatchMedium_32-12 107ns ± 1% 107ns ± 1% ~ (p=0.259 n=18+20)
RegexpMatchMedium_1K-12 34.2µs ± 1% 34.5µs ± 1% +1.03% (p=0.000 n=18+18)
RegexpMatchHard_32-12 1.77µs ± 1% 1.79µs ± 1% +0.73% (p=0.000 n=19+18)
RegexpMatchHard_1K-12 53.6µs ± 1% 54.2µs ± 1% +1.10% (p=0.000 n=19+19)
Template-12 61.5ms ± 1% 63.9ms ± 0% +3.96% (p=0.000 n=18+18)
TimeParse-12 303ns ± 1% 300ns ± 1% -1.08% (p=0.000 n=19+20)
TimeFormat-12 318ns ± 1% 320ns ± 0% +0.79% (p=0.000 n=19+19)
Revcomp-12 (*) 509ms ± 3% 504ms ± 0% ~ (p=0.967 n=7+12)
[Geo mean] 54.3µs 54.8µs +0.88%
(*) Revcomp is highly non-linear, so I only took samples with 2
iterations.
name old time/op new time/op delta
XGarbage-12 2.25ms ± 0% 2.32ms ± 1% +2.74% (p=0.000 n=16+16)
XJSON-12 11.6ms ± 0% 12.4ms ± 0% +6.81% (p=0.000 n=18+18)
XHTTP-12 11.6µs ± 1% 11.8µs ± 1% +1.62% (p=0.000 n=17+18)
Updates #17503.
Updates #17099, since you can't have a rescan list bug if there's no
rescan list. I'm not marking it as fixed, since gcrescanstacks can
still be set to re-enable the rescan lists.
Change-Id: I6e926b4c2dbd4cd56721869d4f817bdbb330b851
Reviewed-on: https://go-review.googlesource.com/31766
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-10-23 09:07:49 -06:00
|
|
|
{"gcrescanstacks", &debug.gcrescanstacks},
|
2015-06-05 09:51:49 -06:00
|
|
|
{"gcstoptheworld", &debug.gcstoptheworld},
|
2014-12-22 08:53:51 -07:00
|
|
|
{"gctrace", &debug.gctrace},
|
|
|
|
{"invalidptr", &debug.invalidptr},
|
2015-06-05 09:51:49 -06:00
|
|
|
{"sbrk", &debug.sbrk},
|
2014-12-22 08:53:51 -07:00
|
|
|
{"scavenge", &debug.scavenge},
|
2014-11-11 15:07:06 -07:00
|
|
|
{"scheddetail", &debug.scheddetail},
|
|
|
|
{"schedtrace", &debug.schedtrace},
|
|
|
|
}
|
|
|
|
|
|
|
|
func parsedebugvars() {
|
2015-07-29 17:04:35 -06:00
|
|
|
// defaults
|
2015-10-16 16:26:00 -06:00
|
|
|
debug.cgocheck = 1
|
2015-07-29 17:04:35 -06:00
|
|
|
debug.invalidptr = 1
|
|
|
|
|
2014-11-11 15:07:06 -07:00
|
|
|
for p := gogetenv("GODEBUG"); p != ""; {
|
|
|
|
field := ""
|
|
|
|
i := index(p, ",")
|
|
|
|
if i < 0 {
|
|
|
|
field, p = p, ""
|
|
|
|
} else {
|
|
|
|
field, p = p[:i], p[i+1:]
|
|
|
|
}
|
|
|
|
i = index(field, "=")
|
|
|
|
if i < 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
key, value := field[:i], field[i+1:]
|
2015-01-28 11:28:59 -07:00
|
|
|
|
|
|
|
// Update MemProfileRate directly here since it
|
2015-02-03 11:13:31 -07:00
|
|
|
// is int, not int32, and should only be updated
|
2015-01-28 11:28:59 -07:00
|
|
|
// if specified in GODEBUG.
|
2015-02-03 11:13:31 -07:00
|
|
|
if key == "memprofilerate" {
|
2016-10-29 17:54:19 -06:00
|
|
|
if n, ok := atoi(value); ok {
|
|
|
|
MemProfileRate = n
|
|
|
|
}
|
2015-01-28 11:28:59 -07:00
|
|
|
} else {
|
|
|
|
for _, v := range dbgvars {
|
|
|
|
if v.name == key {
|
2016-10-29 17:54:19 -06:00
|
|
|
if n, ok := atoi32(value); ok {
|
|
|
|
*v.value = n
|
|
|
|
}
|
2015-01-28 11:28:59 -07:00
|
|
|
}
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-18 09:19:38 -07:00
|
|
|
setTraceback(gogetenv("GOTRACEBACK"))
|
|
|
|
traceback_env = traceback_cache
|
|
|
|
|
|
|
|
// For cgocheck > 1, we turn on the write barrier at all times
|
|
|
|
// and check all pointer writes.
|
|
|
|
if debug.cgocheck > 1 {
|
|
|
|
writeBarrier.cgo = true
|
|
|
|
writeBarrier.enabled = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:linkname setTraceback runtime/debug.SetTraceback
|
|
|
|
func setTraceback(level string) {
|
|
|
|
var t uint32
|
|
|
|
switch level {
|
2015-10-30 09:03:02 -06:00
|
|
|
case "none":
|
2015-12-18 09:19:38 -07:00
|
|
|
t = 0
|
2015-10-30 09:03:02 -06:00
|
|
|
case "single", "":
|
2015-12-18 09:19:38 -07:00
|
|
|
t = 1 << tracebackShift
|
2015-10-30 09:03:02 -06:00
|
|
|
case "all":
|
2015-12-18 09:19:38 -07:00
|
|
|
t = 1<<tracebackShift | tracebackAll
|
2015-10-30 09:03:02 -06:00
|
|
|
case "system":
|
2015-12-18 09:19:38 -07:00
|
|
|
t = 2<<tracebackShift | tracebackAll
|
2014-11-11 15:07:06 -07:00
|
|
|
case "crash":
|
2015-12-18 09:19:38 -07:00
|
|
|
t = 2<<tracebackShift | tracebackAll | tracebackCrash
|
2014-11-11 15:07:06 -07:00
|
|
|
default:
|
2016-10-29 17:54:19 -06:00
|
|
|
t = tracebackAll
|
|
|
|
if n, ok := atoi(level); ok && n == int(uint32(n)) {
|
|
|
|
t |= uint32(n) << tracebackShift
|
|
|
|
}
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-07-11 04:59:00 -06:00
|
|
|
// when C owns the process, simply exit'ing the process on fatal errors
|
|
|
|
// and panics is surprising. Be louder and abort instead.
|
|
|
|
if islibrary || isarchive {
|
2015-12-18 09:19:38 -07:00
|
|
|
t |= tracebackCrash
|
2015-07-11 04:59:00 -06:00
|
|
|
}
|
2015-08-26 11:54:26 -06:00
|
|
|
|
2015-12-18 09:19:38 -07:00
|
|
|
t |= traceback_env
|
2015-11-13 18:45:22 -07:00
|
|
|
|
2015-12-18 09:19:38 -07:00
|
|
|
atomic.Store(&traceback_cache, t)
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Poor mans 64-bit division.
|
|
|
|
// This is a very special function, do not use it if you are not sure what you are doing.
|
|
|
|
// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
|
|
|
|
// Handles overflow in a time-specific manner.
|
|
|
|
//go:nosplit
|
|
|
|
func timediv(v int64, div int32, rem *int32) int32 {
|
|
|
|
res := int32(0)
|
|
|
|
for bit := 30; bit >= 0; bit-- {
|
|
|
|
if v >= int64(div)<<uint(bit) {
|
|
|
|
v = v - (int64(div) << uint(bit))
|
|
|
|
res += 1 << uint(bit)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if v >= int64(div) {
|
|
|
|
if rem != nil {
|
|
|
|
*rem = 0
|
|
|
|
}
|
|
|
|
return 0x7fffffff
|
|
|
|
}
|
|
|
|
if rem != nil {
|
|
|
|
*rem = int32(v)
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func acquirem() *m {
|
|
|
|
_g_ := getg()
|
|
|
|
_g_.m.locks++
|
|
|
|
return _g_.m
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func releasem(mp *m) {
|
|
|
|
_g_ := getg()
|
|
|
|
mp.locks--
|
|
|
|
if mp.locks == 0 && _g_.preempt {
|
|
|
|
// restore the preemption request in case we've cleared it in newstack
|
2015-01-05 09:29:21 -07:00
|
|
|
_g_.stackguard0 = stackPreempt
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func gomcache() *mcache {
|
|
|
|
return getg().m.mcache
|
|
|
|
}
|
|
|
|
|
2014-12-22 11:27:53 -07:00
|
|
|
//go:linkname reflect_typelinks reflect.typelinks
|
2016-03-27 08:21:48 -06:00
|
|
|
func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
|
2016-10-30 18:30:38 -06:00
|
|
|
modules := activeModules()
|
|
|
|
sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
|
|
|
|
ret := [][]int32{modules[0].typelinks}
|
|
|
|
for _, md := range modules[1:] {
|
|
|
|
sections = append(sections, unsafe.Pointer(md.types))
|
|
|
|
ret = append(ret, md.typelinks)
|
2015-03-29 15:59:00 -06:00
|
|
|
}
|
2016-03-27 08:21:48 -06:00
|
|
|
return sections, ret
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
cmd/compile, etc: store method tables as offsets
This CL introduces the typeOff type and a lookup method of the same
name that can turn a typeOff offset into an *rtype.
In a typical Go binary (built with buildmode=exe, pie, c-archive, or
c-shared), there is one moduledata and all typeOff values are offsets
relative to firstmoduledata.types. This makes computing the pointer
cheap in typical programs.
With buildmode=shared (and one day, buildmode=plugin) there are
multiple modules whose relative offset is determined at runtime.
We identify a type in the general case by the pair of the original
*rtype that references it and its typeOff value. We determine
the module from the original pointer, and then use the typeOff from
there to compute the final *rtype.
To ensure there is only one *rtype representing each type, the
runtime initializes a typemap for each module, using any identical
type from an earlier module when resolving that offset. This means
that types computed from an offset match the type mapped by the
pointer dynamic relocations.
A series of followup CLs will replace other *rtype values with typeOff
(and name/*string with nameOff).
For types created at runtime by reflect, type offsets are treated as
global IDs and reference into a reflect offset map kept by the runtime.
darwin/amd64:
cmd/go: -57KB (0.6%)
jujud: -557KB (0.8%)
linux/amd64 PIE:
cmd/go: -361KB (3.0%)
jujud: -3.5MB (4.2%)
For #6853.
Change-Id: Icf096fd884a0a0cb9f280f46f7a26c70a9006c96
Reviewed-on: https://go-review.googlesource.com/21285
Reviewed-by: Ian Lance Taylor <iant@golang.org>
Run-TryBot: David Crawshaw <crawshaw@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-03-28 08:32:27 -06:00
|
|
|
|
2016-03-28 19:51:10 -06:00
|
|
|
// reflect_resolveNameOff resolves a name offset from a base pointer.
|
|
|
|
//go:linkname reflect_resolveNameOff reflect.resolveNameOff
|
|
|
|
func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
|
|
|
|
return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
|
|
|
|
}
|
|
|
|
|
cmd/compile, etc: store method tables as offsets
This CL introduces the typeOff type and a lookup method of the same
name that can turn a typeOff offset into an *rtype.
In a typical Go binary (built with buildmode=exe, pie, c-archive, or
c-shared), there is one moduledata and all typeOff values are offsets
relative to firstmoduledata.types. This makes computing the pointer
cheap in typical programs.
With buildmode=shared (and one day, buildmode=plugin) there are
multiple modules whose relative offset is determined at runtime.
We identify a type in the general case by the pair of the original
*rtype that references it and its typeOff value. We determine
the module from the original pointer, and then use the typeOff from
there to compute the final *rtype.
To ensure there is only one *rtype representing each type, the
runtime initializes a typemap for each module, using any identical
type from an earlier module when resolving that offset. This means
that types computed from an offset match the type mapped by the
pointer dynamic relocations.
A series of followup CLs will replace other *rtype values with typeOff
(and name/*string with nameOff).
For types created at runtime by reflect, type offsets are treated as
global IDs and reference into a reflect offset map kept by the runtime.
darwin/amd64:
cmd/go: -57KB (0.6%)
jujud: -557KB (0.8%)
linux/amd64 PIE:
cmd/go: -361KB (3.0%)
jujud: -3.5MB (4.2%)
For #6853.
Change-Id: Icf096fd884a0a0cb9f280f46f7a26c70a9006c96
Reviewed-on: https://go-review.googlesource.com/21285
Reviewed-by: Ian Lance Taylor <iant@golang.org>
Run-TryBot: David Crawshaw <crawshaw@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-03-28 08:32:27 -06:00
|
|
|
// reflect_resolveTypeOff resolves an *rtype offset from a base type.
|
|
|
|
//go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
|
|
|
|
func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
|
|
|
|
return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
|
|
|
|
}
|
|
|
|
|
|
|
|
// reflect_resolveTextOff resolves an function pointer offset from a base type.
|
|
|
|
//go:linkname reflect_resolveTextOff reflect.resolveTextOff
|
|
|
|
func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
|
|
|
|
return (*_type)(rtype).textOff(textOff(off))
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// reflect_addReflectOff adds a pointer to the reflection offset lookup map.
|
|
|
|
//go:linkname reflect_addReflectOff reflect.addReflectOff
|
|
|
|
func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
|
2016-05-25 11:19:11 -06:00
|
|
|
reflectOffsLock()
|
cmd/compile, etc: store method tables as offsets
This CL introduces the typeOff type and a lookup method of the same
name that can turn a typeOff offset into an *rtype.
In a typical Go binary (built with buildmode=exe, pie, c-archive, or
c-shared), there is one moduledata and all typeOff values are offsets
relative to firstmoduledata.types. This makes computing the pointer
cheap in typical programs.
With buildmode=shared (and one day, buildmode=plugin) there are
multiple modules whose relative offset is determined at runtime.
We identify a type in the general case by the pair of the original
*rtype that references it and its typeOff value. We determine
the module from the original pointer, and then use the typeOff from
there to compute the final *rtype.
To ensure there is only one *rtype representing each type, the
runtime initializes a typemap for each module, using any identical
type from an earlier module when resolving that offset. This means
that types computed from an offset match the type mapped by the
pointer dynamic relocations.
A series of followup CLs will replace other *rtype values with typeOff
(and name/*string with nameOff).
For types created at runtime by reflect, type offsets are treated as
global IDs and reference into a reflect offset map kept by the runtime.
darwin/amd64:
cmd/go: -57KB (0.6%)
jujud: -557KB (0.8%)
linux/amd64 PIE:
cmd/go: -361KB (3.0%)
jujud: -3.5MB (4.2%)
For #6853.
Change-Id: Icf096fd884a0a0cb9f280f46f7a26c70a9006c96
Reviewed-on: https://go-review.googlesource.com/21285
Reviewed-by: Ian Lance Taylor <iant@golang.org>
Run-TryBot: David Crawshaw <crawshaw@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-03-28 08:32:27 -06:00
|
|
|
if reflectOffs.m == nil {
|
|
|
|
reflectOffs.m = make(map[int32]unsafe.Pointer)
|
|
|
|
reflectOffs.minv = make(map[unsafe.Pointer]int32)
|
|
|
|
reflectOffs.next = -1
|
|
|
|
}
|
|
|
|
id, found := reflectOffs.minv[ptr]
|
|
|
|
if !found {
|
|
|
|
id = reflectOffs.next
|
|
|
|
reflectOffs.next-- // use negative offsets as IDs to aid debugging
|
|
|
|
reflectOffs.m[id] = ptr
|
|
|
|
reflectOffs.minv[ptr] = id
|
|
|
|
}
|
2016-05-25 11:19:11 -06:00
|
|
|
reflectOffsUnlock()
|
cmd/compile, etc: store method tables as offsets
This CL introduces the typeOff type and a lookup method of the same
name that can turn a typeOff offset into an *rtype.
In a typical Go binary (built with buildmode=exe, pie, c-archive, or
c-shared), there is one moduledata and all typeOff values are offsets
relative to firstmoduledata.types. This makes computing the pointer
cheap in typical programs.
With buildmode=shared (and one day, buildmode=plugin) there are
multiple modules whose relative offset is determined at runtime.
We identify a type in the general case by the pair of the original
*rtype that references it and its typeOff value. We determine
the module from the original pointer, and then use the typeOff from
there to compute the final *rtype.
To ensure there is only one *rtype representing each type, the
runtime initializes a typemap for each module, using any identical
type from an earlier module when resolving that offset. This means
that types computed from an offset match the type mapped by the
pointer dynamic relocations.
A series of followup CLs will replace other *rtype values with typeOff
(and name/*string with nameOff).
For types created at runtime by reflect, type offsets are treated as
global IDs and reference into a reflect offset map kept by the runtime.
darwin/amd64:
cmd/go: -57KB (0.6%)
jujud: -557KB (0.8%)
linux/amd64 PIE:
cmd/go: -361KB (3.0%)
jujud: -3.5MB (4.2%)
For #6853.
Change-Id: Icf096fd884a0a0cb9f280f46f7a26c70a9006c96
Reviewed-on: https://go-review.googlesource.com/21285
Reviewed-by: Ian Lance Taylor <iant@golang.org>
Run-TryBot: David Crawshaw <crawshaw@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-03-28 08:32:27 -06:00
|
|
|
return id
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|