1
0
mirror of https://github.com/golang/go synced 2024-11-20 00:44:45 -07:00
go/src/runtime/runtime2.go

667 lines
21 KiB
Go
Raw Normal View History

// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
/*
* defined constants
*/
const (
// G status
//
// If you add to this list, add to the list
// of "okay during garbage collection" status
// in mgcmark.go too.
_Gidle = iota // 0
_Grunnable // 1 runnable and on a run queue
_Grunning // 2
_Gsyscall // 3
_Gwaiting // 4
_Gmoribund_unused // 5 currently unused, but hardcoded in gdb scripts
_Gdead // 6
_Genqueue // 7 Only the Gscanenqueue is used.
_Gcopystack // 8 in this state when newstack is moving the stack
// the following encode that the GC is scanning the stack and what to do when it is done
_Gscan = 0x1000 // atomicstatus&~Gscan = the non-scan state,
// _Gscanidle = _Gscan + _Gidle, // Not used. Gidle only used with newly malloced gs
_Gscanrunnable = _Gscan + _Grunnable // 0x1001 When scanning complets make Grunnable (it is already on run queue)
_Gscanrunning = _Gscan + _Grunning // 0x1002 Used to tell preemption newstack routine to scan preempted stack.
_Gscansyscall = _Gscan + _Gsyscall // 0x1003 When scanning completes make is Gsyscall
_Gscanwaiting = _Gscan + _Gwaiting // 0x1004 When scanning completes make it Gwaiting
// _Gscanmoribund_unused, // not possible
// _Gscandead, // not possible
_Gscanenqueue = _Gscan + _Genqueue // When scanning completes make it Grunnable and put on runqueue
)
const (
// P status
_Pidle = iota
_Prunning
_Psyscall
_Pgcstop
_Pdead
)
// The next line makes 'go generate' write the zgen_*.go files with
// per-OS and per-arch information, including constants
// named goos_$GOOS and goarch_$GOARCH for every
// known GOOS and GOARCH. The constant is 1 on the
// current system, 0 otherwise; multiplying by them is
// useful for defining GOOS- or GOARCH-specific constants.
//go:generate go run gengoos.go
type mutex struct {
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
// Used to be a union, but unions break precise GC.
key uintptr
}
type note struct {
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
// Used to be a union, but unions break precise GC.
key uintptr
}
type _string struct {
str *byte
len int
}
type funcval struct {
fn uintptr
// variable-size, fn-specific data here
}
type iface struct {
tab *itab
data unsafe.Pointer
}
type eface struct {
_type *_type
data unsafe.Pointer
}
// The guintptr, muintptr, and puintptr are all used to bypass write barriers.
// It is particularly important to avoid write barriers when the current P has
// been released, because the GC thinks the world is stopped, and an
// unexpected write barrier would not be synchronized with the GC,
// which can lead to a half-executed write barrier that has marked the object
// but not queued it. If the GC skips the object and completes before the
// queuing can occur, it will incorrectly free the object.
//
// We tried using special assignment functions invoked only when not
// holding a running P, but then some updates to a particular memory
// word went through write barriers and some did not. This breaks the
// write barrier shadow checking mode, and it is also scary: better to have
// a word that is completely ignored by the GC than to have one for which
// only a few updates are ignored.
//
// Gs, Ms, and Ps are always reachable via true pointers in the
// allgs, allm, and allp lists or (during allocation before they reach those lists)
// from stack variables.
// A guintptr holds a goroutine pointer, but typed as a uintptr
// to bypass write barriers. It is used in the Gobuf goroutine state
// and in scheduling lists that are manipulated without a P.
//
// The Gobuf.g goroutine pointer is almost always updated by assembly code.
// In one of the few places it is updated by Go code - func save - it must be
// treated as a uintptr to avoid a write barrier being emitted at a bad time.
// Instead of figuring out how to emit the write barriers missing in the
// assembly manipulation, we change the type of the field to uintptr,
// so that it does not require write barriers at all.
//
// Goroutine structs are published in the allg list and never freed.
// That will keep the goroutine structs from being collected.
// There is never a time that Gobuf.g's contain the only references
// to a goroutine: the publishing of the goroutine in allg comes first.
// Goroutine pointers are also kept in non-GC-visible places like TLS,
// so I can't see them ever moving. If we did want to start moving data
// in the GC, we'd need to allocate the goroutine structs from an
// alternate arena. Using guintptr doesn't make that problem any worse.
type guintptr uintptr
func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
type puintptr uintptr
runtime: Remove write barriers during STW. The GC assumes that there will be no asynchronous write barriers when the world is stopped. This keeps the synchronization between write barriers and the GC simple. However, currently, there are a few places in runtime code where this assumption does not hold. The GC stops the world by collecting all Ps, which stops all user Go code, but small parts of the runtime can run without a P. For example, the code that releases a P must still deschedule its G onto a runnable queue before stopping. Similarly, when a G returns from a long-running syscall, it must run code to reacquire a P. Currently, this code can contain write barriers. This can lead to the GC collecting reachable objects if something like the following sequence of events happens: 1. GC stops the world by collecting all Ps. 2. G #1 returns from a syscall (for example), tries to install a pointer to object X, and calls greyobject on X. 3. greyobject on G #1 marks X, but does not yet add it to a write buffer. At this point, X is effectively black, not grey, even though it may point to white objects. 4. GC reaches X through some other path and calls greyobject on X, but greyobject does nothing because X is already marked. 5. GC completes. 6. greyobject on G #1 adds X to a work buffer, but it's too late. 7. Objects that were reachable only through X are incorrectly collected. To fix this, we check the invariant that no asynchronous write barriers happen when the world is stopped by checking that write barriers always have a P, and modify all currently known sources of these writes to disable the write barrier. In all modified cases this is safe because the object in question will always be reachable via some other path. Some of the trace code was turned off, in particular the code that traces returning from a syscall. The GC assumes that as far as the heap is concerned the thread is stopped when it is in a syscall. Upon returning the trace code must not do any heap writes for the same reasons discussed above. Fixes #10098 Fixes #9953 Fixes #9951 Fixes #9884 May relate to #9610 #9771 Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a Reviewed-on: https://go-review.googlesource.com/7504 Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
runtime: Remove write barriers during STW. The GC assumes that there will be no asynchronous write barriers when the world is stopped. This keeps the synchronization between write barriers and the GC simple. However, currently, there are a few places in runtime code where this assumption does not hold. The GC stops the world by collecting all Ps, which stops all user Go code, but small parts of the runtime can run without a P. For example, the code that releases a P must still deschedule its G onto a runnable queue before stopping. Similarly, when a G returns from a long-running syscall, it must run code to reacquire a P. Currently, this code can contain write barriers. This can lead to the GC collecting reachable objects if something like the following sequence of events happens: 1. GC stops the world by collecting all Ps. 2. G #1 returns from a syscall (for example), tries to install a pointer to object X, and calls greyobject on X. 3. greyobject on G #1 marks X, but does not yet add it to a write buffer. At this point, X is effectively black, not grey, even though it may point to white objects. 4. GC reaches X through some other path and calls greyobject on X, but greyobject does nothing because X is already marked. 5. GC completes. 6. greyobject on G #1 adds X to a work buffer, but it's too late. 7. Objects that were reachable only through X are incorrectly collected. To fix this, we check the invariant that no asynchronous write barriers happen when the world is stopped by checking that write barriers always have a P, and modify all currently known sources of these writes to disable the write barrier. In all modified cases this is safe because the object in question will always be reachable via some other path. Some of the trace code was turned off, in particular the code that traces returning from a syscall. The GC assumes that as far as the heap is concerned the thread is stopped when it is in a syscall. Upon returning the trace code must not do any heap writes for the same reasons discussed above. Fixes #10098 Fixes #9953 Fixes #9951 Fixes #9884 May relate to #9610 #9771 Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a Reviewed-on: https://go-review.googlesource.com/7504 Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
type muintptr uintptr
runtime: Remove write barriers during STW. The GC assumes that there will be no asynchronous write barriers when the world is stopped. This keeps the synchronization between write barriers and the GC simple. However, currently, there are a few places in runtime code where this assumption does not hold. The GC stops the world by collecting all Ps, which stops all user Go code, but small parts of the runtime can run without a P. For example, the code that releases a P must still deschedule its G onto a runnable queue before stopping. Similarly, when a G returns from a long-running syscall, it must run code to reacquire a P. Currently, this code can contain write barriers. This can lead to the GC collecting reachable objects if something like the following sequence of events happens: 1. GC stops the world by collecting all Ps. 2. G #1 returns from a syscall (for example), tries to install a pointer to object X, and calls greyobject on X. 3. greyobject on G #1 marks X, but does not yet add it to a write buffer. At this point, X is effectively black, not grey, even though it may point to white objects. 4. GC reaches X through some other path and calls greyobject on X, but greyobject does nothing because X is already marked. 5. GC completes. 6. greyobject on G #1 adds X to a work buffer, but it's too late. 7. Objects that were reachable only through X are incorrectly collected. To fix this, we check the invariant that no asynchronous write barriers happen when the world is stopped by checking that write barriers always have a P, and modify all currently known sources of these writes to disable the write barrier. In all modified cases this is safe because the object in question will always be reachable via some other path. Some of the trace code was turned off, in particular the code that traces returning from a syscall. The GC assumes that as far as the heap is concerned the thread is stopped when it is in a syscall. Upon returning the trace code must not do any heap writes for the same reasons discussed above. Fixes #10098 Fixes #9953 Fixes #9951 Fixes #9884 May relate to #9610 #9771 Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a Reviewed-on: https://go-review.googlesource.com/7504 Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
runtime: Remove write barriers during STW. The GC assumes that there will be no asynchronous write barriers when the world is stopped. This keeps the synchronization between write barriers and the GC simple. However, currently, there are a few places in runtime code where this assumption does not hold. The GC stops the world by collecting all Ps, which stops all user Go code, but small parts of the runtime can run without a P. For example, the code that releases a P must still deschedule its G onto a runnable queue before stopping. Similarly, when a G returns from a long-running syscall, it must run code to reacquire a P. Currently, this code can contain write barriers. This can lead to the GC collecting reachable objects if something like the following sequence of events happens: 1. GC stops the world by collecting all Ps. 2. G #1 returns from a syscall (for example), tries to install a pointer to object X, and calls greyobject on X. 3. greyobject on G #1 marks X, but does not yet add it to a write buffer. At this point, X is effectively black, not grey, even though it may point to white objects. 4. GC reaches X through some other path and calls greyobject on X, but greyobject does nothing because X is already marked. 5. GC completes. 6. greyobject on G #1 adds X to a work buffer, but it's too late. 7. Objects that were reachable only through X are incorrectly collected. To fix this, we check the invariant that no asynchronous write barriers happen when the world is stopped by checking that write barriers always have a P, and modify all currently known sources of these writes to disable the write barrier. In all modified cases this is safe because the object in question will always be reachable via some other path. Some of the trace code was turned off, in particular the code that traces returning from a syscall. The GC assumes that as far as the heap is concerned the thread is stopped when it is in a syscall. Upon returning the trace code must not do any heap writes for the same reasons discussed above. Fixes #10098 Fixes #9953 Fixes #9951 Fixes #9884 May relate to #9610 #9771 Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a Reviewed-on: https://go-review.googlesource.com/7504 Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
type gobuf struct {
// The offsets of sp, pc, and g are known to (hard-coded in) libmach.
sp uintptr
pc uintptr
g guintptr
ctxt unsafe.Pointer // this has to be a pointer so that gc scans it
ret uintreg
lr uintptr
bp uintptr // for GOEXPERIMENT=framepointer
}
// Known to compiler.
// Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
type sudog struct {
g *g
selectdone *uint32
next *sudog
prev *sudog
elem unsafe.Pointer // data element
releasetime int64
nrelease int32 // -1 for acquire
waitlink *sudog // g.waiting list
}
type gcstats struct {
// the struct must consist of only uint64's,
// because it is casted to uint64[].
nhandoff uint64
nhandoffcnt uint64
nprocyield uint64
nosyield uint64
nsleep uint64
}
type libcall struct {
fn uintptr
n uintptr // number of parameters
args uintptr // parameters
r1 uintptr // return values
r2 uintptr
err uintptr // error number
}
// describes how to handle callback
type wincallbackcontext struct {
gobody unsafe.Pointer // go function to call
argsize uintptr // callback arguments size (in bytes)
restorestack uintptr // adjust stack on return by (in bytes) (386 only)
cleanstack bool
}
// Stack describes a Go execution stack.
// The bounds of the stack are exactly [lo, hi),
// with no implicit data structures on either side.
type stack struct {
lo uintptr
hi uintptr
}
type g struct {
// Stack parameters.
// stack describes the actual stack memory: [stack.lo, stack.hi).
// stackguard0 is the stack pointer compared in the Go stack growth prologue.
// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
// stackguard1 is the stack pointer compared in the C stack growth prologue.
// It is stack.lo+StackGuard on g0 and gsignal stacks.
// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
stack stack // offset known to runtime/cgo
stackguard0 uintptr // offset known to liblink
stackguard1 uintptr // offset known to liblink
_panic *_panic // innermost panic - offset known to liblink
_defer *_defer // innermost defer
sched gobuf
syscallsp uintptr // if status==gsyscall, syscallsp = sched.sp to use during gc
syscallpc uintptr // if status==gsyscall, syscallpc = sched.pc to use during gc
param unsafe.Pointer // passed parameter on wakeup
atomicstatus uint32
goid int64
waitsince int64 // approx time when the g become blocked
waitreason string // if status==gwaiting
schedlink guintptr
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
paniconfault bool // panic (instead of crash) on unexpected fault address
preemptscan bool // preempted g does scan for gc
gcworkdone bool // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan
throwsplit bool // must not split stack
raceignore int8 // ignore race detection events
m *m // for debuggers, but offset not hard-coded
lockedm *m
sig uint32
writebuf []byte
sigcode0 uintptr
sigcode1 uintptr
sigpc uintptr
gopc uintptr // pc of go statement that created this goroutine
startpc uintptr // pc of goroutine function
racectx uintptr
waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr)
readyg *g // scratch for readyExecute
// Per-G gcController state
gcalloc uintptr // bytes allocated during this GC cycle
gcscanwork int64 // scan work done (or stolen) this GC cycle
}
type mts struct {
tv_sec int64
tv_nsec int64
}
type mscratch struct {
v [6]uintptr
}
type m struct {
g0 *g // goroutine with scheduling stack
morebuf gobuf // gobuf arg to morestack
// Fields not known to debuggers.
runtime: Remove write barriers during STW. The GC assumes that there will be no asynchronous write barriers when the world is stopped. This keeps the synchronization between write barriers and the GC simple. However, currently, there are a few places in runtime code where this assumption does not hold. The GC stops the world by collecting all Ps, which stops all user Go code, but small parts of the runtime can run without a P. For example, the code that releases a P must still deschedule its G onto a runnable queue before stopping. Similarly, when a G returns from a long-running syscall, it must run code to reacquire a P. Currently, this code can contain write barriers. This can lead to the GC collecting reachable objects if something like the following sequence of events happens: 1. GC stops the world by collecting all Ps. 2. G #1 returns from a syscall (for example), tries to install a pointer to object X, and calls greyobject on X. 3. greyobject on G #1 marks X, but does not yet add it to a write buffer. At this point, X is effectively black, not grey, even though it may point to white objects. 4. GC reaches X through some other path and calls greyobject on X, but greyobject does nothing because X is already marked. 5. GC completes. 6. greyobject on G #1 adds X to a work buffer, but it's too late. 7. Objects that were reachable only through X are incorrectly collected. To fix this, we check the invariant that no asynchronous write barriers happen when the world is stopped by checking that write barriers always have a P, and modify all currently known sources of these writes to disable the write barrier. In all modified cases this is safe because the object in question will always be reachable via some other path. Some of the trace code was turned off, in particular the code that traces returning from a syscall. The GC assumes that as far as the heap is concerned the thread is stopped when it is in a syscall. Upon returning the trace code must not do any heap writes for the same reasons discussed above. Fixes #10098 Fixes #9953 Fixes #9951 Fixes #9884 May relate to #9610 #9771 Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a Reviewed-on: https://go-review.googlesource.com/7504 Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
procid uint64 // for debuggers, but offset not hard-coded
gsignal *g // signal-handling g
tls [4]uintptr // thread-local storage (for x86 extern register)
mstartfn func()
curg *g // current running goroutine
caughtsig guintptr // goroutine running during fatal signal
p puintptr // attached p for executing go code (nil if not executing go code)
nextp puintptr
id int32
mallocing int32
throwing int32
preemptoff string // if != "", keep curg running on this m
locks int32
softfloat int32
dying int32
profilehz int32
helpgc int32
spinning bool // m is out of work and is actively looking for work
blocked bool // m is blocked on a note
inwb bool // m is executing a write barrier
printlock int8
fastrand uint32
ncgocall uint64 // number of cgo calls in total
ncgo int32 // number of cgo calls currently in progress
park note
alllink *m // on allm
schedlink muintptr
machport uint32 // return address for mach ipc (os x)
mcache *mcache
lockedg *g
createstack [32]uintptr // stack that created this thread.
freglo [16]uint32 // d[i] lsb and f[i]
freghi [16]uint32 // d[i] msb and f[i+16]
fflag uint32 // floating point compare flags
locked uint32 // tracking for lockosthread
nextwaitm uintptr // next m waiting for lock
waitsema uintptr // semaphore for parking on locks
waitsemacount uint32
waitsemalock uint32
gcstats gcstats
currentwbuf uintptr // use locks or atomic operations such as xchguinptr to access.
needextram bool
traceback uint8
waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
waitlock unsafe.Pointer
waittraceev byte
waittraceskip int
syscalltick uint32
//#ifdef GOOS_windows
thread uintptr // thread handle
// these are here because they are too large to be on the stack
// of low-level NOSPLIT functions.
libcall libcall
libcallpc uintptr // for cpu profiler
libcallsp uintptr
libcallg guintptr
//#endif
//#ifdef GOOS_solaris
perrno *int32 // pointer to tls errno
// these are here because they are too large to be on the stack
// of low-level NOSPLIT functions.
//LibCall libcall;
ts mts
scratch mscratch
//#endif
//#ifdef GOOS_plan9
notesig *int8
errstr *byte
//#endif
}
type p struct {
lock mutex
id int32
status uint32 // one of pidle/prunning/...
link puintptr
schedtick uint32 // incremented on every scheduler call
syscalltick uint32 // incremented on every system call
m muintptr // back-link to associated m (nil if idle)
mcache *mcache
deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go)
deferpoolbuf [5][32]*_defer
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
goidcache uint64
goidcacheend uint64
// Queue of runnable goroutines.
runqhead uint32
runqtail uint32
runq [256]*g
// Available G's (status == Gdead)
gfree *g
gfreecnt int32
sudogcache []*sudog
sudogbuf [128]*sudog
tracebuf *traceBuf
palloc persistentAlloc // per-P to avoid mutex
// Per-P GC state
runtime: multi-threaded, utilization-scheduled background mark Currently, the concurrent mark phase is performed by the main GC goroutine. Prior to the previous commit enabling preemption, this caused marking to always consume 1/GOMAXPROCS of the available CPU time. If GOMAXPROCS=1, this meant background GC would consume 100% of the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use less than the goal of 25%. If GOMAXPROCS=4, background GC would use the goal 25%, but if the mutator wasn't using the remaining 75%, background marking wouldn't take advantage of the idle time. Enabling preemption in the previous commit made GC miss CPU targets in completely different ways, but set us up to bring everything back in line. This change replaces the fixed GC goroutine with per-P background mark goroutines. Once started, these goroutines don't go in the standard run queues; instead, they are scheduled specially such that the time spent in mutator assists and the background mark goroutines totals 25% of the CPU time available to the program. Furthermore, this lets background marking take advantage of idle Ps, which significantly boosts GC performance for applications that under-utilize the CPU. This requires also changing how time is reported for gctrace, so this change splits the concurrent mark CPU time into assist/background/idle scanning. This also requires increasing the size of the StackRecord slice used in a GoroutineProfile test. Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157 Reviewed-on: https://go-review.googlesource.com/8850 Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
gcAssistTime int64 // Nanoseconds in assistAlloc
gcBgMarkWorker *g
gcBgMarkIdle bool
pad [64]byte
}
const (
// The max value of GOMAXPROCS.
// There are no fundamental restrictions on the value.
_MaxGomaxprocs = 1 << 8
)
type schedt struct {
lock mutex
goidgen uint64
midle muintptr // idle m's waiting for work
nmidle int32 // number of idle m's waiting for work
nmidlelocked int32 // number of locked m's waiting for work
mcount int32 // number of m's that have been created
maxmcount int32 // maximum number of m's allowed (or die)
pidle puintptr // idle p's
npidle uint32
nmspinning uint32
// Global runnable queue.
runqhead guintptr
runqtail guintptr
runqsize int32
// Global cache of dead G's.
gflock mutex
gfree *g
ngfree int32
// Central cache of sudog structs.
sudoglock mutex
sudogcache *sudog
// Central pool of available defer structs of different sizes.
deferlock mutex
deferpool [5]*_defer
gcwaiting uint32 // gc is waiting to run
stopwait int32
stopnote note
sysmonwait uint32
sysmonnote note
lastpoll uint64
profilehz int32 // cpu profiling rate
procresizetime int64 // nanotime() of last change to gomaxprocs
totaltime int64 // ∫gomaxprocs dt up to procresizetime
}
// The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
// The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
// External locks are not recursive; a second lock is silently ignored.
// The upper bits of m->lockedcount record the nesting depth of calls to lockOSThread
// (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
// Internal locks can be recursive. For instance, a lock for cgo can occur while the main
// goroutine is holding the lock during the initialization phase.
const (
_LockExternal = 1
_LockInternal = 2
)
type sigtabtt struct {
flags int32
name *int8
}
const (
_SigNotify = 1 << 0 // let signal.Notify have signal, even if from kernel
_SigKill = 1 << 1 // if signal.Notify doesn't take it, exit quietly
_SigThrow = 1 << 2 // if signal.Notify doesn't take it, exit loudly
_SigPanic = 1 << 3 // if the signal is from the kernel, panic
_SigDefault = 1 << 4 // if the signal isn't explicitly requested, don't monitor it
_SigHandling = 1 << 5 // our signal handler is registered
_SigIgnored = 1 << 6 // the signal was ignored before we registered for it
_SigGoExit = 1 << 7 // cause all runtime procs to exit (only used on Plan 9).
_SigSetStack = 1 << 8 // add SA_ONSTACK to libc handler
)
// Layout of in-memory per-function information prepared by linker
// See http://golang.org/s/go12symtab.
// Keep in sync with linker
// and with package debug/gosym and with symtab.go in package runtime.
type _func struct {
entry uintptr // start pc
nameoff int32 // function name
args int32 // in/out args size
frame int32 // legacy frame size; use pcsp if possible
pcsp int32
pcfile int32
pcln int32
npcdata int32
nfuncdata int32
}
// layout of Itab known to compilers
// allocated in non-garbage-collected memory
type itab struct {
inter *interfacetype
_type *_type
link *itab
bad int32
unused int32
fun [1]uintptr // variable sized
}
// Lock-free stack node.
// // Also known to export_test.go.
type lfnode struct {
next uint64
pushcnt uintptr
}
// Indicates to write barrier and sychronization task to preform.
const (
_GCoff = iota // GC not running, write barrier disabled
_GCquiesce // unused state
_GCstw // unused state
_GCscan // GC collecting roots into workbufs, write barrier disabled
_GCmark // GC marking from workbufs, write barrier ENABLED
_GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED
_GCsweep // GC mark completed; sweeping in background, write barrier disabled
)
type forcegcstate struct {
lock mutex
g *g
idle uint32
}
var gcphase uint32
/*
* known to compiler
*/
const (
_Structrnd = regSize
)
// startup_random_data holds random bytes initialized at startup. These come from
// the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
var startupRandomData []byte
// extendRandom extends the random numbers in r[:n] to the whole slice r.
// Treats n<0 as n==0.
func extendRandom(r []byte, n int) {
if n < 0 {
n = 0
}
for n < len(r) {
// Extend random bits using hash function & time seed
w := n
if w > 16 {
w = 16
}
h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
for i := 0; i < ptrSize && n < len(r); i++ {
r[n] = byte(h)
n++
h >>= 8
}
}
}
/*
* deferred subroutine calls
*/
type _defer struct {
siz int32
started bool
sp uintptr // sp at time of defer
pc uintptr
fn *funcval
_panic *_panic // panic that is running defer
link *_defer
}
/*
* panics
*/
type _panic struct {
argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
arg interface{} // argument to panic
link *_panic // link to earlier panic
recovered bool // whether this panic is over
aborted bool // the panic was aborted
}
/*
* stack traces
*/
type stkframe struct {
fn *_func // function being run
pc uintptr // program counter within fn
continpc uintptr // program counter where execution can continue, or 0 if not
lr uintptr // program counter at caller aka link register
sp uintptr // stack pointer at pc
fp uintptr // stack pointer at caller aka frame pointer
varp uintptr // top of local variables
argp uintptr // pointer to function arguments
arglen uintptr // number of bytes at argp
argmap *bitvector // force use of this argmap
}
const (
_TraceRuntimeFrames = 1 << 0 // include frames for internal runtime functions.
_TraceTrap = 1 << 1 // the initial PC, SP are from a trap, not a return PC from a call
)
const (
// The maximum number of frames we print for a traceback
_TracebackMaxFrames = 100
)
var (
emptystring string
allg **g
allglen uintptr
lastg *g
allm *m
allp [_MaxGomaxprocs + 1]*p
gomaxprocs int32
panicking uint32
goos *int8
ncpu int32
signote note
forcegc forcegcstate
sched schedt
newprocs int32
// Information about what cpu features are available.
// Set on startup in asm_{x86,amd64}.s.
cpuid_ecx uint32
cpuid_edx uint32
lfenceBeforeRdtsc bool
)
// Set by the linker so the runtime can determine the buildmode.
var (
islibrary bool // -buildmode=c-shared
isarchive bool // -buildmode=c-archive
)
/*
* mutual exclusion locks. in the uncontended case,
* as fast as spin locks (just a few user-level instructions),
* but on the contention path they sleep in the kernel.
* a zeroed Mutex is unlocked (no need to initialize each lock).
*/
/*
* sleep and wakeup on one-time events.
* before any calls to notesleep or notewakeup,
* must call noteclear to initialize the Note.
* then, exactly one thread can call notesleep
* and exactly one thread can call notewakeup (once).
* once notewakeup has been called, the notesleep
* will return. future notesleep will return immediately.
* subsequent noteclear must be called only after
* previous notesleep has returned, e.g. it's disallowed
* to call noteclear straight after notewakeup.
*
* notetsleep is like notesleep but wakes up after
* a given number of nanoseconds even if the event
* has not yet happened. if a goroutine uses notetsleep to
* wake up early, it must wait to call noteclear until it
* can be sure that no other goroutine is calling
* notewakeup.
*
* notesleep/notetsleep are generally called on g0,
* notetsleepg is similar to notetsleep but is called on user g.
*/
// bool runtime·notetsleep(Note*, int64); // false - timeout
// bool runtime·notetsleepg(Note*, int64); // false - timeout
/*
* Lock-free stack.
* Initialize uint64 head to 0, compare with 0 to test for emptiness.
* The stack does not keep pointers to nodes,
* so they can be garbage collected if there are no other pointers to nodes.
*/
// for mmap, we only pass the lower 32 bits of file offset to the
// assembly routine; the higher bits (if required), should be provided
// by the assembly routine as 0.