2014-11-11 15:07:06 -07:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
2015-11-02 12:09:24 -07:00
|
|
|
import (
|
|
|
|
"runtime/internal/atomic"
|
2015-11-11 10:39:30 -07:00
|
|
|
"runtime/internal/sys"
|
2015-11-02 12:09:24 -07:00
|
|
|
"unsafe"
|
|
|
|
)
|
2014-11-11 15:07:06 -07:00
|
|
|
|
|
|
|
// Keep a cached value to make gotraceback fast,
|
|
|
|
// since we call it on every call to gentraceback.
|
2015-10-30 09:03:02 -06:00
|
|
|
// The cached value is a uint32 in which the low bits
|
|
|
|
// are the "crash" and "all" settings and the remaining
|
|
|
|
// bits are the traceback value (0 off, 1 on, 2 include system).
|
|
|
|
const (
|
|
|
|
tracebackCrash = 1 << iota
|
|
|
|
tracebackAll
|
|
|
|
tracebackShift = iota
|
|
|
|
)
|
|
|
|
|
|
|
|
var traceback_cache uint32 = 2 << tracebackShift
|
2015-12-18 09:19:38 -07:00
|
|
|
var traceback_env uint32
|
2015-10-30 09:03:02 -06:00
|
|
|
|
|
|
|
// gotraceback returns the current traceback settings.
|
|
|
|
//
|
|
|
|
// If level is 0, suppress all tracebacks.
|
|
|
|
// If level is 1, show tracebacks, but exclude runtime frames.
|
|
|
|
// If level is 2, show tracebacks including runtime frames.
|
|
|
|
// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
|
|
|
|
// If crash is set, crash (core dump, etc) after tracebacking.
|
|
|
|
//
|
2014-11-11 15:07:06 -07:00
|
|
|
//go:nosplit
|
2015-10-30 09:03:02 -06:00
|
|
|
func gotraceback() (level int32, all, crash bool) {
|
2014-11-11 15:07:06 -07:00
|
|
|
_g_ := getg()
|
2015-10-30 09:03:02 -06:00
|
|
|
all = _g_.m.throwing > 0
|
2014-11-11 15:07:06 -07:00
|
|
|
if _g_.m.traceback != 0 {
|
2015-10-30 09:03:02 -06:00
|
|
|
level = int32(_g_.m.traceback)
|
|
|
|
return
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-12-18 09:19:38 -07:00
|
|
|
t := atomic.Load(&traceback_cache)
|
|
|
|
crash = t&tracebackCrash != 0
|
|
|
|
all = all || t&tracebackAll != 0
|
|
|
|
level = int32(t >> tracebackShift)
|
2015-10-30 09:03:02 -06:00
|
|
|
return
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
argc int32
|
|
|
|
argv **byte
|
|
|
|
)
|
|
|
|
|
2016-02-22 23:26:50 -07:00
|
|
|
// nosplit for use in linux startup sysargs
|
2014-11-11 15:07:06 -07:00
|
|
|
//go:nosplit
|
|
|
|
func argv_index(argv **byte, i int32) *byte {
|
2015-11-11 10:39:30 -07:00
|
|
|
return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func args(c int32, v **byte) {
|
|
|
|
argc = c
|
|
|
|
argv = v
|
|
|
|
sysargs(c, v)
|
|
|
|
}
|
|
|
|
|
|
|
|
func goargs() {
|
|
|
|
if GOOS == "windows" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
argslice = make([]string, argc)
|
|
|
|
for i := int32(0); i < argc; i++ {
|
|
|
|
argslice[i] = gostringnocopy(argv_index(argv, i))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func goenvs_unix() {
|
2014-12-16 16:34:55 -07:00
|
|
|
// TODO(austin): ppc64 in dynamic linking mode doesn't
|
2016-03-01 16:21:55 -07:00
|
|
|
// guarantee env[] will immediately follow argv. Might cause
|
2014-12-16 16:34:55 -07:00
|
|
|
// problems.
|
2014-11-11 15:07:06 -07:00
|
|
|
n := int32(0)
|
|
|
|
for argv_index(argv, argc+1+n) != nil {
|
|
|
|
n++
|
|
|
|
}
|
|
|
|
|
|
|
|
envs = make([]string, n)
|
|
|
|
for i := int32(0); i < n; i++ {
|
2015-03-17 08:18:30 -06:00
|
|
|
envs[i] = gostring(argv_index(argv, argc+1+i))
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func environ() []string {
|
|
|
|
return envs
|
|
|
|
}
|
|
|
|
|
2015-01-29 09:54:45 -07:00
|
|
|
// TODO: These should be locals in testAtomic64, but we don't 8-byte
|
|
|
|
// align stack variables on 386.
|
|
|
|
var test_z64, test_x64 uint64
|
|
|
|
|
2014-11-11 15:07:06 -07:00
|
|
|
func testAtomic64() {
|
2015-01-29 09:54:45 -07:00
|
|
|
test_z64 = 42
|
|
|
|
test_x64 = 0
|
|
|
|
prefetcht0(uintptr(unsafe.Pointer(&test_z64)))
|
|
|
|
prefetcht1(uintptr(unsafe.Pointer(&test_z64)))
|
|
|
|
prefetcht2(uintptr(unsafe.Pointer(&test_z64)))
|
|
|
|
prefetchnta(uintptr(unsafe.Pointer(&test_z64)))
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Cas64(&test_z64, test_x64, 1) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-01-29 09:54:45 -07:00
|
|
|
if test_x64 != 0 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-01-29 09:54:45 -07:00
|
|
|
test_x64 = 42
|
2015-11-02 12:09:24 -07:00
|
|
|
if !atomic.Cas64(&test_z64, test_x64, 1) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-01-29 09:54:45 -07:00
|
|
|
if test_x64 != 42 || test_z64 != 1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Load64(&test_z64) != 1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("load64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-02 12:09:24 -07:00
|
|
|
atomic.Store64(&test_z64, (1<<40)+1)
|
|
|
|
if atomic.Load64(&test_z64) != (1<<40)+1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("store64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("xadd64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Load64(&test_z64) != (2<<40)+2 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("xadd64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("xchg64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Load64(&test_z64) != (3<<40)+3 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("xchg64 failed")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func check() {
|
|
|
|
var (
|
|
|
|
a int8
|
|
|
|
b uint8
|
|
|
|
c int16
|
|
|
|
d uint16
|
|
|
|
e int32
|
|
|
|
f uint32
|
|
|
|
g int64
|
|
|
|
h uint64
|
|
|
|
i, i1 float32
|
|
|
|
j, j1 float64
|
|
|
|
k, k1 unsafe.Pointer
|
|
|
|
l *uint16
|
2014-11-14 10:10:52 -07:00
|
|
|
m [4]byte
|
2014-11-11 15:07:06 -07:00
|
|
|
)
|
|
|
|
type x1t struct {
|
|
|
|
x uint8
|
|
|
|
}
|
|
|
|
type y1t struct {
|
|
|
|
x1 x1t
|
|
|
|
y uint8
|
|
|
|
}
|
|
|
|
var x1 x1t
|
|
|
|
var y1 y1t
|
|
|
|
|
|
|
|
if unsafe.Sizeof(a) != 1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad a")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(b) != 1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad b")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(c) != 2 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad c")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(d) != 2 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad d")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(e) != 4 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad e")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(f) != 4 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad f")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(g) != 8 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad g")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(h) != 8 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad h")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(i) != 4 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad i")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(j) != 8 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad j")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-11 10:39:30 -07:00
|
|
|
if unsafe.Sizeof(k) != sys.PtrSize {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad k")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-11-11 10:39:30 -07:00
|
|
|
if unsafe.Sizeof(l) != sys.PtrSize {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad l")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(x1) != 1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad unsafe.Sizeof x1")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Offsetof(y1.y) != 1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad offsetof y1.y")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if unsafe.Sizeof(y1) != 2 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad unsafe.Sizeof y1")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad timediv")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
var z uint32
|
|
|
|
z = 1
|
2015-11-02 12:09:24 -07:00
|
|
|
if !atomic.Cas(&z, 1, 2) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas1")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if z != 2 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas2")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
z = 4
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Cas(&z, 5, 6) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas3")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if z != 4 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas4")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
2014-11-14 10:10:52 -07:00
|
|
|
z = 0xffffffff
|
2015-11-02 12:09:24 -07:00
|
|
|
if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas5")
|
2014-11-14 10:10:52 -07:00
|
|
|
}
|
|
|
|
if z != 0xfffffffe {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("cas6")
|
2014-11-14 10:10:52 -07:00
|
|
|
}
|
|
|
|
|
2014-11-11 15:07:06 -07:00
|
|
|
k = unsafe.Pointer(uintptr(0xfedcb123))
|
2015-11-11 10:39:30 -07:00
|
|
|
if sys.PtrSize == 8 {
|
2016-02-29 16:01:00 -07:00
|
|
|
k = unsafe.Pointer(uintptr(k) << 10)
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if casp(&k, nil, nil) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("casp1")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
k1 = add(k, 1)
|
|
|
|
if !casp(&k, k, k1) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("casp2")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if k != k1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("casp3")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
2014-11-14 10:10:52 -07:00
|
|
|
m = [4]byte{1, 1, 1, 1}
|
2015-11-02 12:09:24 -07:00
|
|
|
atomic.Or8(&m[1], 0xf0)
|
2014-11-14 10:10:52 -07:00
|
|
|
if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("atomicor8")
|
2014-11-14 10:10:52 -07:00
|
|
|
}
|
|
|
|
|
2014-11-11 15:07:06 -07:00
|
|
|
*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
|
|
|
|
if j == j {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float64nan")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if !(j != j) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float64nan1")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
|
|
|
|
if j == j1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float64nan2")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if !(j != j1) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float64nan3")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
|
|
|
|
if i == i {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float32nan")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if i == i {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float32nan1")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
|
|
|
|
if i == i1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float32nan2")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
if i == i1 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("float32nan3")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
testAtomic64()
|
|
|
|
|
|
|
|
if _FixedStack != round2(_FixedStack) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("FixedStack is not power-of-2")
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-09-03 00:44:26 -06:00
|
|
|
|
|
|
|
if !checkASM() {
|
|
|
|
throw("assembly checks failed")
|
|
|
|
}
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
type dbgVar struct {
|
|
|
|
name string
|
|
|
|
value *int32
|
|
|
|
}
|
|
|
|
|
2015-01-28 11:28:59 -07:00
|
|
|
// Holds variables parsed from GODEBUG env var,
|
2015-02-03 11:13:31 -07:00
|
|
|
// except for "memprofilerate" since there is an
|
2015-01-28 11:28:59 -07:00
|
|
|
// existing int var for that value, which may
|
|
|
|
// already have an initial value.
|
2014-12-22 08:53:51 -07:00
|
|
|
var debug struct {
|
2015-06-05 09:51:49 -06:00
|
|
|
allocfreetrace int32
|
2015-10-16 16:26:00 -06:00
|
|
|
cgocheck int32
|
2015-06-05 09:51:49 -06:00
|
|
|
efence int32
|
|
|
|
gccheckmark int32
|
|
|
|
gcpacertrace int32
|
|
|
|
gcshrinkstackoff int32
|
|
|
|
gcstackbarrieroff int32
|
2015-08-26 11:54:26 -06:00
|
|
|
gcstackbarrierall int32
|
2015-06-05 09:51:49 -06:00
|
|
|
gcstoptheworld int32
|
|
|
|
gctrace int32
|
|
|
|
invalidptr int32
|
|
|
|
sbrk int32
|
|
|
|
scavenge int32
|
|
|
|
scheddetail int32
|
|
|
|
schedtrace int32
|
|
|
|
wbshadow int32
|
2014-12-22 08:53:51 -07:00
|
|
|
}
|
2014-11-11 15:07:06 -07:00
|
|
|
|
|
|
|
var dbgvars = []dbgVar{
|
|
|
|
{"allocfreetrace", &debug.allocfreetrace},
|
2015-10-16 16:26:00 -06:00
|
|
|
{"cgocheck", &debug.cgocheck},
|
2014-11-11 15:07:06 -07:00
|
|
|
{"efence", &debug.efence},
|
2015-06-05 09:51:49 -06:00
|
|
|
{"gccheckmark", &debug.gccheckmark},
|
|
|
|
{"gcpacertrace", &debug.gcpacertrace},
|
|
|
|
{"gcshrinkstackoff", &debug.gcshrinkstackoff},
|
|
|
|
{"gcstackbarrieroff", &debug.gcstackbarrieroff},
|
2015-08-26 11:54:26 -06:00
|
|
|
{"gcstackbarrierall", &debug.gcstackbarrierall},
|
2015-06-05 09:51:49 -06:00
|
|
|
{"gcstoptheworld", &debug.gcstoptheworld},
|
2014-12-22 08:53:51 -07:00
|
|
|
{"gctrace", &debug.gctrace},
|
|
|
|
{"invalidptr", &debug.invalidptr},
|
2015-06-05 09:51:49 -06:00
|
|
|
{"sbrk", &debug.sbrk},
|
2014-12-22 08:53:51 -07:00
|
|
|
{"scavenge", &debug.scavenge},
|
2014-11-11 15:07:06 -07:00
|
|
|
{"scheddetail", &debug.scheddetail},
|
|
|
|
{"schedtrace", &debug.schedtrace},
|
2014-12-22 08:53:51 -07:00
|
|
|
{"wbshadow", &debug.wbshadow},
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func parsedebugvars() {
|
2015-07-29 17:04:35 -06:00
|
|
|
// defaults
|
2015-10-16 16:26:00 -06:00
|
|
|
debug.cgocheck = 1
|
2015-07-29 17:04:35 -06:00
|
|
|
debug.invalidptr = 1
|
|
|
|
|
2014-11-11 15:07:06 -07:00
|
|
|
for p := gogetenv("GODEBUG"); p != ""; {
|
|
|
|
field := ""
|
|
|
|
i := index(p, ",")
|
|
|
|
if i < 0 {
|
|
|
|
field, p = p, ""
|
|
|
|
} else {
|
|
|
|
field, p = p[:i], p[i+1:]
|
|
|
|
}
|
|
|
|
i = index(field, "=")
|
|
|
|
if i < 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
key, value := field[:i], field[i+1:]
|
2015-01-28 11:28:59 -07:00
|
|
|
|
|
|
|
// Update MemProfileRate directly here since it
|
2015-02-03 11:13:31 -07:00
|
|
|
// is int, not int32, and should only be updated
|
2015-01-28 11:28:59 -07:00
|
|
|
// if specified in GODEBUG.
|
2015-02-03 11:13:31 -07:00
|
|
|
if key == "memprofilerate" {
|
2015-01-28 11:28:59 -07:00
|
|
|
MemProfileRate = atoi(value)
|
|
|
|
} else {
|
|
|
|
for _, v := range dbgvars {
|
|
|
|
if v.name == key {
|
|
|
|
*v.value = int32(atoi(value))
|
|
|
|
}
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-18 09:19:38 -07:00
|
|
|
setTraceback(gogetenv("GOTRACEBACK"))
|
|
|
|
traceback_env = traceback_cache
|
|
|
|
|
|
|
|
if debug.gcstackbarrierall > 0 {
|
|
|
|
firstStackBarrierOffset = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// For cgocheck > 1, we turn on the write barrier at all times
|
|
|
|
// and check all pointer writes.
|
|
|
|
if debug.cgocheck > 1 {
|
|
|
|
writeBarrier.cgo = true
|
|
|
|
writeBarrier.enabled = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:linkname setTraceback runtime/debug.SetTraceback
|
|
|
|
func setTraceback(level string) {
|
|
|
|
var t uint32
|
|
|
|
switch level {
|
2015-10-30 09:03:02 -06:00
|
|
|
case "none":
|
2015-12-18 09:19:38 -07:00
|
|
|
t = 0
|
2015-10-30 09:03:02 -06:00
|
|
|
case "single", "":
|
2015-12-18 09:19:38 -07:00
|
|
|
t = 1 << tracebackShift
|
2015-10-30 09:03:02 -06:00
|
|
|
case "all":
|
2015-12-18 09:19:38 -07:00
|
|
|
t = 1<<tracebackShift | tracebackAll
|
2015-10-30 09:03:02 -06:00
|
|
|
case "system":
|
2015-12-18 09:19:38 -07:00
|
|
|
t = 2<<tracebackShift | tracebackAll
|
2014-11-11 15:07:06 -07:00
|
|
|
case "crash":
|
2015-12-18 09:19:38 -07:00
|
|
|
t = 2<<tracebackShift | tracebackAll | tracebackCrash
|
2014-11-11 15:07:06 -07:00
|
|
|
default:
|
2015-12-18 09:19:38 -07:00
|
|
|
t = uint32(atoi(level))<<tracebackShift | tracebackAll
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
2015-07-11 04:59:00 -06:00
|
|
|
// when C owns the process, simply exit'ing the process on fatal errors
|
|
|
|
// and panics is surprising. Be louder and abort instead.
|
|
|
|
if islibrary || isarchive {
|
2015-12-18 09:19:38 -07:00
|
|
|
t |= tracebackCrash
|
2015-07-11 04:59:00 -06:00
|
|
|
}
|
2015-08-26 11:54:26 -06:00
|
|
|
|
2015-12-18 09:19:38 -07:00
|
|
|
t |= traceback_env
|
2015-11-13 18:45:22 -07:00
|
|
|
|
2015-12-18 09:19:38 -07:00
|
|
|
atomic.Store(&traceback_cache, t)
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Poor mans 64-bit division.
|
|
|
|
// This is a very special function, do not use it if you are not sure what you are doing.
|
|
|
|
// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
|
|
|
|
// Handles overflow in a time-specific manner.
|
|
|
|
//go:nosplit
|
|
|
|
func timediv(v int64, div int32, rem *int32) int32 {
|
|
|
|
res := int32(0)
|
|
|
|
for bit := 30; bit >= 0; bit-- {
|
|
|
|
if v >= int64(div)<<uint(bit) {
|
|
|
|
v = v - (int64(div) << uint(bit))
|
|
|
|
res += 1 << uint(bit)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if v >= int64(div) {
|
|
|
|
if rem != nil {
|
|
|
|
*rem = 0
|
|
|
|
}
|
|
|
|
return 0x7fffffff
|
|
|
|
}
|
|
|
|
if rem != nil {
|
|
|
|
*rem = int32(v)
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func acquirem() *m {
|
|
|
|
_g_ := getg()
|
|
|
|
_g_.m.locks++
|
|
|
|
return _g_.m
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func releasem(mp *m) {
|
|
|
|
_g_ := getg()
|
|
|
|
mp.locks--
|
|
|
|
if mp.locks == 0 && _g_.preempt {
|
|
|
|
// restore the preemption request in case we've cleared it in newstack
|
2015-01-05 09:29:21 -07:00
|
|
|
_g_.stackguard0 = stackPreempt
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func gomcache() *mcache {
|
|
|
|
return getg().m.mcache
|
|
|
|
}
|
|
|
|
|
2014-12-22 11:27:53 -07:00
|
|
|
//go:linkname reflect_typelinks reflect.typelinks
|
2016-03-27 08:21:48 -06:00
|
|
|
func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
|
|
|
|
sections := []unsafe.Pointer{unsafe.Pointer(firstmoduledata.types)}
|
|
|
|
ret := [][]int32{firstmoduledata.typelinks}
|
2015-04-06 18:55:02 -06:00
|
|
|
for datap := firstmoduledata.next; datap != nil; datap = datap.next {
|
2016-03-27 08:21:48 -06:00
|
|
|
sections = append(sections, unsafe.Pointer(datap.types))
|
2015-03-29 15:59:00 -06:00
|
|
|
ret = append(ret, datap.typelinks)
|
|
|
|
}
|
2016-03-27 08:21:48 -06:00
|
|
|
return sections, ret
|
2014-11-11 15:07:06 -07:00
|
|
|
}
|
cmd/compile, etc: store method tables as offsets
This CL introduces the typeOff type and a lookup method of the same
name that can turn a typeOff offset into an *rtype.
In a typical Go binary (built with buildmode=exe, pie, c-archive, or
c-shared), there is one moduledata and all typeOff values are offsets
relative to firstmoduledata.types. This makes computing the pointer
cheap in typical programs.
With buildmode=shared (and one day, buildmode=plugin) there are
multiple modules whose relative offset is determined at runtime.
We identify a type in the general case by the pair of the original
*rtype that references it and its typeOff value. We determine
the module from the original pointer, and then use the typeOff from
there to compute the final *rtype.
To ensure there is only one *rtype representing each type, the
runtime initializes a typemap for each module, using any identical
type from an earlier module when resolving that offset. This means
that types computed from an offset match the type mapped by the
pointer dynamic relocations.
A series of followup CLs will replace other *rtype values with typeOff
(and name/*string with nameOff).
For types created at runtime by reflect, type offsets are treated as
global IDs and reference into a reflect offset map kept by the runtime.
darwin/amd64:
cmd/go: -57KB (0.6%)
jujud: -557KB (0.8%)
linux/amd64 PIE:
cmd/go: -361KB (3.0%)
jujud: -3.5MB (4.2%)
For #6853.
Change-Id: Icf096fd884a0a0cb9f280f46f7a26c70a9006c96
Reviewed-on: https://go-review.googlesource.com/21285
Reviewed-by: Ian Lance Taylor <iant@golang.org>
Run-TryBot: David Crawshaw <crawshaw@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2016-03-28 08:32:27 -06:00
|
|
|
|
|
|
|
// reflect_resolveTypeOff resolves an *rtype offset from a base type.
|
|
|
|
//go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
|
|
|
|
func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
|
|
|
|
return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
|
|
|
|
}
|
|
|
|
|
|
|
|
// reflect_resolveTextOff resolves an function pointer offset from a base type.
|
|
|
|
//go:linkname reflect_resolveTextOff reflect.resolveTextOff
|
|
|
|
func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
|
|
|
|
return (*_type)(rtype).textOff(textOff(off))
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// reflect_addReflectOff adds a pointer to the reflection offset lookup map.
|
|
|
|
//go:linkname reflect_addReflectOff reflect.addReflectOff
|
|
|
|
func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
|
|
|
|
lock(&reflectOffs.lock)
|
|
|
|
if reflectOffs.m == nil {
|
|
|
|
reflectOffs.m = make(map[int32]unsafe.Pointer)
|
|
|
|
reflectOffs.minv = make(map[unsafe.Pointer]int32)
|
|
|
|
reflectOffs.next = -1
|
|
|
|
}
|
|
|
|
id, found := reflectOffs.minv[ptr]
|
|
|
|
if !found {
|
|
|
|
id = reflectOffs.next
|
|
|
|
reflectOffs.next-- // use negative offsets as IDs to aid debugging
|
|
|
|
reflectOffs.m[id] = ptr
|
|
|
|
reflectOffs.minv[ptr] = id
|
|
|
|
}
|
|
|
|
unlock(&reflectOffs.lock)
|
|
|
|
return id
|
|
|
|
}
|