mirror of
https://github.com/golang/go
synced 2024-11-19 16:44:43 -07:00
7045e6f6c4
The only non test user of the assembler prefetch functions is the heapBits.prefetch function which is itself unused. The runtime prefetch functions have no functionality on most platforms and are not inlineable since they are written in assembler. The function call overhead eliminates the performance gains that could be achieved with prefetching and would degrade performance for platforms where the functions are no-ops. If prefetch functions are needed back again later they can be improved by avoiding the function call overhead and implementing them as intrinsics. Change-Id: I52c553cf3607ffe09f0441c6e7a0a818cb21117d Reviewed-on: https://go-review.googlesource.com/44370 Run-TryBot: Martin Möhrmann <moehrmann@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com> Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
528 lines
11 KiB
Go
528 lines
11 KiB
Go
// Copyright 2009 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package runtime
|
|
|
|
import (
|
|
"runtime/internal/atomic"
|
|
"runtime/internal/sys"
|
|
"unsafe"
|
|
)
|
|
|
|
// Keep a cached value to make gotraceback fast,
|
|
// since we call it on every call to gentraceback.
|
|
// The cached value is a uint32 in which the low bits
|
|
// are the "crash" and "all" settings and the remaining
|
|
// bits are the traceback value (0 off, 1 on, 2 include system).
|
|
const (
|
|
tracebackCrash = 1 << iota
|
|
tracebackAll
|
|
tracebackShift = iota
|
|
)
|
|
|
|
var traceback_cache uint32 = 2 << tracebackShift
|
|
var traceback_env uint32
|
|
|
|
// gotraceback returns the current traceback settings.
|
|
//
|
|
// If level is 0, suppress all tracebacks.
|
|
// If level is 1, show tracebacks, but exclude runtime frames.
|
|
// If level is 2, show tracebacks including runtime frames.
|
|
// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
|
|
// If crash is set, crash (core dump, etc) after tracebacking.
|
|
//
|
|
//go:nosplit
|
|
func gotraceback() (level int32, all, crash bool) {
|
|
_g_ := getg()
|
|
t := atomic.Load(&traceback_cache)
|
|
crash = t&tracebackCrash != 0
|
|
all = _g_.m.throwing > 0 || t&tracebackAll != 0
|
|
if _g_.m.traceback != 0 {
|
|
level = int32(_g_.m.traceback)
|
|
} else {
|
|
level = int32(t >> tracebackShift)
|
|
}
|
|
return
|
|
}
|
|
|
|
var (
|
|
argc int32
|
|
argv **byte
|
|
)
|
|
|
|
// nosplit for use in linux startup sysargs
|
|
//go:nosplit
|
|
func argv_index(argv **byte, i int32) *byte {
|
|
return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
|
|
}
|
|
|
|
func args(c int32, v **byte) {
|
|
argc = c
|
|
argv = v
|
|
sysargs(c, v)
|
|
}
|
|
|
|
func goargs() {
|
|
if GOOS == "windows" {
|
|
return
|
|
}
|
|
argslice = make([]string, argc)
|
|
for i := int32(0); i < argc; i++ {
|
|
argslice[i] = gostringnocopy(argv_index(argv, i))
|
|
}
|
|
}
|
|
|
|
func goenvs_unix() {
|
|
// TODO(austin): ppc64 in dynamic linking mode doesn't
|
|
// guarantee env[] will immediately follow argv. Might cause
|
|
// problems.
|
|
n := int32(0)
|
|
for argv_index(argv, argc+1+n) != nil {
|
|
n++
|
|
}
|
|
|
|
envs = make([]string, n)
|
|
for i := int32(0); i < n; i++ {
|
|
envs[i] = gostring(argv_index(argv, argc+1+i))
|
|
}
|
|
}
|
|
|
|
func environ() []string {
|
|
return envs
|
|
}
|
|
|
|
// TODO: These should be locals in testAtomic64, but we don't 8-byte
|
|
// align stack variables on 386.
|
|
var test_z64, test_x64 uint64
|
|
|
|
func testAtomic64() {
|
|
test_z64 = 42
|
|
test_x64 = 0
|
|
if atomic.Cas64(&test_z64, test_x64, 1) {
|
|
throw("cas64 failed")
|
|
}
|
|
if test_x64 != 0 {
|
|
throw("cas64 failed")
|
|
}
|
|
test_x64 = 42
|
|
if !atomic.Cas64(&test_z64, test_x64, 1) {
|
|
throw("cas64 failed")
|
|
}
|
|
if test_x64 != 42 || test_z64 != 1 {
|
|
throw("cas64 failed")
|
|
}
|
|
if atomic.Load64(&test_z64) != 1 {
|
|
throw("load64 failed")
|
|
}
|
|
atomic.Store64(&test_z64, (1<<40)+1)
|
|
if atomic.Load64(&test_z64) != (1<<40)+1 {
|
|
throw("store64 failed")
|
|
}
|
|
if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
|
|
throw("xadd64 failed")
|
|
}
|
|
if atomic.Load64(&test_z64) != (2<<40)+2 {
|
|
throw("xadd64 failed")
|
|
}
|
|
if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
|
|
throw("xchg64 failed")
|
|
}
|
|
if atomic.Load64(&test_z64) != (3<<40)+3 {
|
|
throw("xchg64 failed")
|
|
}
|
|
}
|
|
|
|
func check() {
|
|
var (
|
|
a int8
|
|
b uint8
|
|
c int16
|
|
d uint16
|
|
e int32
|
|
f uint32
|
|
g int64
|
|
h uint64
|
|
i, i1 float32
|
|
j, j1 float64
|
|
k, k1 unsafe.Pointer
|
|
l *uint16
|
|
m [4]byte
|
|
)
|
|
type x1t struct {
|
|
x uint8
|
|
}
|
|
type y1t struct {
|
|
x1 x1t
|
|
y uint8
|
|
}
|
|
var x1 x1t
|
|
var y1 y1t
|
|
|
|
if unsafe.Sizeof(a) != 1 {
|
|
throw("bad a")
|
|
}
|
|
if unsafe.Sizeof(b) != 1 {
|
|
throw("bad b")
|
|
}
|
|
if unsafe.Sizeof(c) != 2 {
|
|
throw("bad c")
|
|
}
|
|
if unsafe.Sizeof(d) != 2 {
|
|
throw("bad d")
|
|
}
|
|
if unsafe.Sizeof(e) != 4 {
|
|
throw("bad e")
|
|
}
|
|
if unsafe.Sizeof(f) != 4 {
|
|
throw("bad f")
|
|
}
|
|
if unsafe.Sizeof(g) != 8 {
|
|
throw("bad g")
|
|
}
|
|
if unsafe.Sizeof(h) != 8 {
|
|
throw("bad h")
|
|
}
|
|
if unsafe.Sizeof(i) != 4 {
|
|
throw("bad i")
|
|
}
|
|
if unsafe.Sizeof(j) != 8 {
|
|
throw("bad j")
|
|
}
|
|
if unsafe.Sizeof(k) != sys.PtrSize {
|
|
throw("bad k")
|
|
}
|
|
if unsafe.Sizeof(l) != sys.PtrSize {
|
|
throw("bad l")
|
|
}
|
|
if unsafe.Sizeof(x1) != 1 {
|
|
throw("bad unsafe.Sizeof x1")
|
|
}
|
|
if unsafe.Offsetof(y1.y) != 1 {
|
|
throw("bad offsetof y1.y")
|
|
}
|
|
if unsafe.Sizeof(y1) != 2 {
|
|
throw("bad unsafe.Sizeof y1")
|
|
}
|
|
|
|
if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
|
|
throw("bad timediv")
|
|
}
|
|
|
|
var z uint32
|
|
z = 1
|
|
if !atomic.Cas(&z, 1, 2) {
|
|
throw("cas1")
|
|
}
|
|
if z != 2 {
|
|
throw("cas2")
|
|
}
|
|
|
|
z = 4
|
|
if atomic.Cas(&z, 5, 6) {
|
|
throw("cas3")
|
|
}
|
|
if z != 4 {
|
|
throw("cas4")
|
|
}
|
|
|
|
z = 0xffffffff
|
|
if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
|
|
throw("cas5")
|
|
}
|
|
if z != 0xfffffffe {
|
|
throw("cas6")
|
|
}
|
|
|
|
k = unsafe.Pointer(uintptr(0xfedcb123))
|
|
if sys.PtrSize == 8 {
|
|
k = unsafe.Pointer(uintptr(k) << 10)
|
|
}
|
|
if casp(&k, nil, nil) {
|
|
throw("casp1")
|
|
}
|
|
k1 = add(k, 1)
|
|
if !casp(&k, k, k1) {
|
|
throw("casp2")
|
|
}
|
|
if k != k1 {
|
|
throw("casp3")
|
|
}
|
|
|
|
m = [4]byte{1, 1, 1, 1}
|
|
atomic.Or8(&m[1], 0xf0)
|
|
if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
|
|
throw("atomicor8")
|
|
}
|
|
|
|
m = [4]byte{0xff, 0xff, 0xff, 0xff}
|
|
atomic.And8(&m[1], 0x1)
|
|
if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
|
|
throw("atomicand8")
|
|
}
|
|
|
|
*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
|
|
if j == j {
|
|
throw("float64nan")
|
|
}
|
|
if !(j != j) {
|
|
throw("float64nan1")
|
|
}
|
|
|
|
*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
|
|
if j == j1 {
|
|
throw("float64nan2")
|
|
}
|
|
if !(j != j1) {
|
|
throw("float64nan3")
|
|
}
|
|
|
|
*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
|
|
if i == i {
|
|
throw("float32nan")
|
|
}
|
|
if i == i {
|
|
throw("float32nan1")
|
|
}
|
|
|
|
*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
|
|
if i == i1 {
|
|
throw("float32nan2")
|
|
}
|
|
if i == i1 {
|
|
throw("float32nan3")
|
|
}
|
|
|
|
testAtomic64()
|
|
|
|
if _FixedStack != round2(_FixedStack) {
|
|
throw("FixedStack is not power-of-2")
|
|
}
|
|
|
|
if !checkASM() {
|
|
throw("assembly checks failed")
|
|
}
|
|
}
|
|
|
|
type dbgVar struct {
|
|
name string
|
|
value *int32
|
|
}
|
|
|
|
// Holds variables parsed from GODEBUG env var,
|
|
// except for "memprofilerate" since there is an
|
|
// existing int var for that value, which may
|
|
// already have an initial value.
|
|
var debug struct {
|
|
allocfreetrace int32
|
|
cgocheck int32
|
|
efence int32
|
|
gccheckmark int32
|
|
gcpacertrace int32
|
|
gcshrinkstackoff int32
|
|
gcrescanstacks int32
|
|
gcstoptheworld int32
|
|
gctrace int32
|
|
invalidptr int32
|
|
sbrk int32
|
|
scavenge int32
|
|
scheddetail int32
|
|
schedtrace int32
|
|
}
|
|
|
|
var dbgvars = []dbgVar{
|
|
{"allocfreetrace", &debug.allocfreetrace},
|
|
{"cgocheck", &debug.cgocheck},
|
|
{"efence", &debug.efence},
|
|
{"gccheckmark", &debug.gccheckmark},
|
|
{"gcpacertrace", &debug.gcpacertrace},
|
|
{"gcshrinkstackoff", &debug.gcshrinkstackoff},
|
|
{"gcrescanstacks", &debug.gcrescanstacks},
|
|
{"gcstoptheworld", &debug.gcstoptheworld},
|
|
{"gctrace", &debug.gctrace},
|
|
{"invalidptr", &debug.invalidptr},
|
|
{"sbrk", &debug.sbrk},
|
|
{"scavenge", &debug.scavenge},
|
|
{"scheddetail", &debug.scheddetail},
|
|
{"schedtrace", &debug.schedtrace},
|
|
}
|
|
|
|
func parsedebugvars() {
|
|
// defaults
|
|
debug.cgocheck = 1
|
|
debug.invalidptr = 1
|
|
|
|
for p := gogetenv("GODEBUG"); p != ""; {
|
|
field := ""
|
|
i := index(p, ",")
|
|
if i < 0 {
|
|
field, p = p, ""
|
|
} else {
|
|
field, p = p[:i], p[i+1:]
|
|
}
|
|
i = index(field, "=")
|
|
if i < 0 {
|
|
continue
|
|
}
|
|
key, value := field[:i], field[i+1:]
|
|
|
|
// Update MemProfileRate directly here since it
|
|
// is int, not int32, and should only be updated
|
|
// if specified in GODEBUG.
|
|
if key == "memprofilerate" {
|
|
if n, ok := atoi(value); ok {
|
|
MemProfileRate = n
|
|
}
|
|
} else {
|
|
for _, v := range dbgvars {
|
|
if v.name == key {
|
|
if n, ok := atoi32(value); ok {
|
|
*v.value = n
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
setTraceback(gogetenv("GOTRACEBACK"))
|
|
traceback_env = traceback_cache
|
|
|
|
// For cgocheck > 1, we turn on the write barrier at all times
|
|
// and check all pointer writes.
|
|
if debug.cgocheck > 1 {
|
|
writeBarrier.cgo = true
|
|
writeBarrier.enabled = true
|
|
}
|
|
}
|
|
|
|
//go:linkname setTraceback runtime/debug.SetTraceback
|
|
func setTraceback(level string) {
|
|
var t uint32
|
|
switch level {
|
|
case "none":
|
|
t = 0
|
|
case "single", "":
|
|
t = 1 << tracebackShift
|
|
case "all":
|
|
t = 1<<tracebackShift | tracebackAll
|
|
case "system":
|
|
t = 2<<tracebackShift | tracebackAll
|
|
case "crash":
|
|
t = 2<<tracebackShift | tracebackAll | tracebackCrash
|
|
default:
|
|
t = tracebackAll
|
|
if n, ok := atoi(level); ok && n == int(uint32(n)) {
|
|
t |= uint32(n) << tracebackShift
|
|
}
|
|
}
|
|
// when C owns the process, simply exit'ing the process on fatal errors
|
|
// and panics is surprising. Be louder and abort instead.
|
|
if islibrary || isarchive {
|
|
t |= tracebackCrash
|
|
}
|
|
|
|
t |= traceback_env
|
|
|
|
atomic.Store(&traceback_cache, t)
|
|
}
|
|
|
|
// Poor mans 64-bit division.
|
|
// This is a very special function, do not use it if you are not sure what you are doing.
|
|
// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
|
|
// Handles overflow in a time-specific manner.
|
|
//go:nosplit
|
|
func timediv(v int64, div int32, rem *int32) int32 {
|
|
res := int32(0)
|
|
for bit := 30; bit >= 0; bit-- {
|
|
if v >= int64(div)<<uint(bit) {
|
|
v = v - (int64(div) << uint(bit))
|
|
res += 1 << uint(bit)
|
|
}
|
|
}
|
|
if v >= int64(div) {
|
|
if rem != nil {
|
|
*rem = 0
|
|
}
|
|
return 0x7fffffff
|
|
}
|
|
if rem != nil {
|
|
*rem = int32(v)
|
|
}
|
|
return res
|
|
}
|
|
|
|
// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
|
|
|
|
//go:nosplit
|
|
func acquirem() *m {
|
|
_g_ := getg()
|
|
_g_.m.locks++
|
|
return _g_.m
|
|
}
|
|
|
|
//go:nosplit
|
|
func releasem(mp *m) {
|
|
_g_ := getg()
|
|
mp.locks--
|
|
if mp.locks == 0 && _g_.preempt {
|
|
// restore the preemption request in case we've cleared it in newstack
|
|
_g_.stackguard0 = stackPreempt
|
|
}
|
|
}
|
|
|
|
//go:nosplit
|
|
func gomcache() *mcache {
|
|
return getg().m.mcache
|
|
}
|
|
|
|
//go:linkname reflect_typelinks reflect.typelinks
|
|
func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
|
|
modules := activeModules()
|
|
sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
|
|
ret := [][]int32{modules[0].typelinks}
|
|
for _, md := range modules[1:] {
|
|
sections = append(sections, unsafe.Pointer(md.types))
|
|
ret = append(ret, md.typelinks)
|
|
}
|
|
return sections, ret
|
|
}
|
|
|
|
// reflect_resolveNameOff resolves a name offset from a base pointer.
|
|
//go:linkname reflect_resolveNameOff reflect.resolveNameOff
|
|
func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
|
|
return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
|
|
}
|
|
|
|
// reflect_resolveTypeOff resolves an *rtype offset from a base type.
|
|
//go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
|
|
func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
|
|
return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
|
|
}
|
|
|
|
// reflect_resolveTextOff resolves an function pointer offset from a base type.
|
|
//go:linkname reflect_resolveTextOff reflect.resolveTextOff
|
|
func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
|
|
return (*_type)(rtype).textOff(textOff(off))
|
|
|
|
}
|
|
|
|
// reflect_addReflectOff adds a pointer to the reflection offset lookup map.
|
|
//go:linkname reflect_addReflectOff reflect.addReflectOff
|
|
func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
|
|
reflectOffsLock()
|
|
if reflectOffs.m == nil {
|
|
reflectOffs.m = make(map[int32]unsafe.Pointer)
|
|
reflectOffs.minv = make(map[unsafe.Pointer]int32)
|
|
reflectOffs.next = -1
|
|
}
|
|
id, found := reflectOffs.minv[ptr]
|
|
if !found {
|
|
id = reflectOffs.next
|
|
reflectOffs.next-- // use negative offsets as IDs to aid debugging
|
|
reflectOffs.m[id] = ptr
|
|
reflectOffs.minv[ptr] = id
|
|
}
|
|
reflectOffsUnlock()
|
|
return id
|
|
}
|