2014-11-11 15:05:19 -07:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
|
|
import "unsafe"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* defined constants
|
|
|
|
*/
|
|
|
|
const (
|
|
|
|
// G status
|
|
|
|
//
|
|
|
|
// If you add to this list, add to the list
|
|
|
|
// of "okay during garbage collection" status
|
2015-03-11 13:58:47 -06:00
|
|
|
// in mgcmark.go too.
|
2014-11-11 15:05:19 -07:00
|
|
|
_Gidle = iota // 0
|
|
|
|
_Grunnable // 1 runnable and on a run queue
|
|
|
|
_Grunning // 2
|
|
|
|
_Gsyscall // 3
|
|
|
|
_Gwaiting // 4
|
|
|
|
_Gmoribund_unused // 5 currently unused, but hardcoded in gdb scripts
|
|
|
|
_Gdead // 6
|
|
|
|
_Genqueue // 7 Only the Gscanenqueue is used.
|
|
|
|
_Gcopystack // 8 in this state when newstack is moving the stack
|
|
|
|
// the following encode that the GC is scanning the stack and what to do when it is done
|
|
|
|
_Gscan = 0x1000 // atomicstatus&~Gscan = the non-scan state,
|
|
|
|
// _Gscanidle = _Gscan + _Gidle, // Not used. Gidle only used with newly malloced gs
|
|
|
|
_Gscanrunnable = _Gscan + _Grunnable // 0x1001 When scanning complets make Grunnable (it is already on run queue)
|
|
|
|
_Gscanrunning = _Gscan + _Grunning // 0x1002 Used to tell preemption newstack routine to scan preempted stack.
|
|
|
|
_Gscansyscall = _Gscan + _Gsyscall // 0x1003 When scanning completes make is Gsyscall
|
|
|
|
_Gscanwaiting = _Gscan + _Gwaiting // 0x1004 When scanning completes make it Gwaiting
|
|
|
|
// _Gscanmoribund_unused, // not possible
|
|
|
|
// _Gscandead, // not possible
|
|
|
|
_Gscanenqueue = _Gscan + _Genqueue // When scanning completes make it Grunnable and put on runqueue
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// P status
|
|
|
|
_Pidle = iota
|
|
|
|
_Prunning
|
|
|
|
_Psyscall
|
|
|
|
_Pgcstop
|
|
|
|
_Pdead
|
|
|
|
)
|
|
|
|
|
2014-11-18 10:07:50 -07:00
|
|
|
// The next line makes 'go generate' write the zgen_*.go files with
|
|
|
|
// per-OS and per-arch information, including constants
|
|
|
|
// named goos_$GOOS and goarch_$GOARCH for every
|
|
|
|
// known GOOS and GOARCH. The constant is 1 on the
|
|
|
|
// current system, 0 otherwise; multiplying by them is
|
|
|
|
// useful for defining GOOS- or GOARCH-specific constants.
|
|
|
|
//go:generate go run gengoos.go
|
2014-11-11 15:05:19 -07:00
|
|
|
|
|
|
|
type mutex struct {
|
|
|
|
// Futex-based impl treats it as uint32 key,
|
|
|
|
// while sema-based impl as M* waitm.
|
|
|
|
// Used to be a union, but unions break precise GC.
|
|
|
|
key uintptr
|
|
|
|
}
|
|
|
|
|
|
|
|
type note struct {
|
|
|
|
// Futex-based impl treats it as uint32 key,
|
|
|
|
// while sema-based impl as M* waitm.
|
|
|
|
// Used to be a union, but unions break precise GC.
|
|
|
|
key uintptr
|
|
|
|
}
|
|
|
|
|
|
|
|
type _string struct {
|
|
|
|
str *byte
|
|
|
|
len int
|
|
|
|
}
|
|
|
|
|
|
|
|
type funcval struct {
|
|
|
|
fn uintptr
|
|
|
|
// variable-size, fn-specific data here
|
|
|
|
}
|
|
|
|
|
|
|
|
type iface struct {
|
|
|
|
tab *itab
|
|
|
|
data unsafe.Pointer
|
|
|
|
}
|
|
|
|
|
|
|
|
type eface struct {
|
|
|
|
_type *_type
|
|
|
|
data unsafe.Pointer
|
|
|
|
}
|
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
// The guintptr, muintptr, and puintptr are all used to bypass write barriers.
|
|
|
|
// It is particularly important to avoid write barriers when the current P has
|
|
|
|
// been released, because the GC thinks the world is stopped, and an
|
|
|
|
// unexpected write barrier would not be synchronized with the GC,
|
|
|
|
// which can lead to a half-executed write barrier that has marked the object
|
|
|
|
// but not queued it. If the GC skips the object and completes before the
|
|
|
|
// queuing can occur, it will incorrectly free the object.
|
|
|
|
//
|
|
|
|
// We tried using special assignment functions invoked only when not
|
|
|
|
// holding a running P, but then some updates to a particular memory
|
|
|
|
// word went through write barriers and some did not. This breaks the
|
|
|
|
// write barrier shadow checking mode, and it is also scary: better to have
|
|
|
|
// a word that is completely ignored by the GC than to have one for which
|
|
|
|
// only a few updates are ignored.
|
|
|
|
//
|
|
|
|
// Gs, Ms, and Ps are always reachable via true pointers in the
|
|
|
|
// allgs, allm, and allp lists or (during allocation before they reach those lists)
|
|
|
|
// from stack variables.
|
|
|
|
|
2014-12-22 20:43:49 -07:00
|
|
|
// A guintptr holds a goroutine pointer, but typed as a uintptr
|
2015-04-16 22:21:30 -06:00
|
|
|
// to bypass write barriers. It is used in the Gobuf goroutine state
|
|
|
|
// and in scheduling lists that are manipulated without a P.
|
2014-12-22 20:43:49 -07:00
|
|
|
//
|
|
|
|
// The Gobuf.g goroutine pointer is almost always updated by assembly code.
|
|
|
|
// In one of the few places it is updated by Go code - func save - it must be
|
|
|
|
// treated as a uintptr to avoid a write barrier being emitted at a bad time.
|
|
|
|
// Instead of figuring out how to emit the write barriers missing in the
|
|
|
|
// assembly manipulation, we change the type of the field to uintptr,
|
|
|
|
// so that it does not require write barriers at all.
|
|
|
|
//
|
|
|
|
// Goroutine structs are published in the allg list and never freed.
|
|
|
|
// That will keep the goroutine structs from being collected.
|
|
|
|
// There is never a time that Gobuf.g's contain the only references
|
|
|
|
// to a goroutine: the publishing of the goroutine in allg comes first.
|
|
|
|
// Goroutine pointers are also kept in non-GC-visible places like TLS,
|
|
|
|
// so I can't see them ever moving. If we did want to start moving data
|
|
|
|
// in the GC, we'd need to allocate the goroutine structs from an
|
|
|
|
// alternate arena. Using guintptr doesn't make that problem any worse.
|
|
|
|
type guintptr uintptr
|
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
|
|
|
|
func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
|
2014-12-22 20:43:49 -07:00
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
type puintptr uintptr
|
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
|
|
|
|
func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
|
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
type muintptr uintptr
|
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
|
|
|
|
func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
|
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
|
|
|
|
2014-11-11 15:05:19 -07:00
|
|
|
type gobuf struct {
|
|
|
|
// The offsets of sp, pc, and g are known to (hard-coded in) libmach.
|
|
|
|
sp uintptr
|
|
|
|
pc uintptr
|
2014-12-22 20:43:49 -07:00
|
|
|
g guintptr
|
2014-11-11 15:05:19 -07:00
|
|
|
ctxt unsafe.Pointer // this has to be a pointer so that gc scans it
|
|
|
|
ret uintreg
|
|
|
|
lr uintptr
|
2015-01-14 09:09:50 -07:00
|
|
|
bp uintptr // for GOEXPERIMENT=framepointer
|
2014-11-11 15:05:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Known to compiler.
|
2015-03-11 13:58:47 -06:00
|
|
|
// Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
|
2014-11-11 15:05:19 -07:00
|
|
|
type sudog struct {
|
|
|
|
g *g
|
|
|
|
selectdone *uint32
|
|
|
|
next *sudog
|
|
|
|
prev *sudog
|
|
|
|
elem unsafe.Pointer // data element
|
|
|
|
releasetime int64
|
|
|
|
nrelease int32 // -1 for acquire
|
|
|
|
waitlink *sudog // g.waiting list
|
|
|
|
}
|
|
|
|
|
|
|
|
type gcstats struct {
|
|
|
|
// the struct must consist of only uint64's,
|
|
|
|
// because it is casted to uint64[].
|
|
|
|
nhandoff uint64
|
|
|
|
nhandoffcnt uint64
|
|
|
|
nprocyield uint64
|
|
|
|
nosyield uint64
|
|
|
|
nsleep uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
type libcall struct {
|
|
|
|
fn uintptr
|
|
|
|
n uintptr // number of parameters
|
|
|
|
args uintptr // parameters
|
|
|
|
r1 uintptr // return values
|
|
|
|
r2 uintptr
|
|
|
|
err uintptr // error number
|
|
|
|
}
|
|
|
|
|
|
|
|
// describes how to handle callback
|
|
|
|
type wincallbackcontext struct {
|
|
|
|
gobody unsafe.Pointer // go function to call
|
|
|
|
argsize uintptr // callback arguments size (in bytes)
|
|
|
|
restorestack uintptr // adjust stack on return by (in bytes) (386 only)
|
|
|
|
cleanstack bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stack describes a Go execution stack.
|
|
|
|
// The bounds of the stack are exactly [lo, hi),
|
|
|
|
// with no implicit data structures on either side.
|
|
|
|
type stack struct {
|
|
|
|
lo uintptr
|
|
|
|
hi uintptr
|
|
|
|
}
|
|
|
|
|
|
|
|
type g struct {
|
|
|
|
// Stack parameters.
|
|
|
|
// stack describes the actual stack memory: [stack.lo, stack.hi).
|
2015-01-05 09:29:21 -07:00
|
|
|
// stackguard0 is the stack pointer compared in the Go stack growth prologue.
|
2014-11-11 15:05:19 -07:00
|
|
|
// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
|
2015-01-05 09:29:21 -07:00
|
|
|
// stackguard1 is the stack pointer compared in the C stack growth prologue.
|
|
|
|
// It is stack.lo+StackGuard on g0 and gsignal stacks.
|
|
|
|
// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
|
|
|
|
stack stack // offset known to runtime/cgo
|
|
|
|
stackguard0 uintptr // offset known to liblink
|
|
|
|
stackguard1 uintptr // offset known to liblink
|
2014-11-11 15:05:19 -07:00
|
|
|
|
|
|
|
_panic *_panic // innermost panic - offset known to liblink
|
|
|
|
_defer *_defer // innermost defer
|
|
|
|
sched gobuf
|
|
|
|
syscallsp uintptr // if status==gsyscall, syscallsp = sched.sp to use during gc
|
|
|
|
syscallpc uintptr // if status==gsyscall, syscallpc = sched.pc to use during gc
|
|
|
|
param unsafe.Pointer // passed parameter on wakeup
|
|
|
|
atomicstatus uint32
|
|
|
|
goid int64
|
|
|
|
waitsince int64 // approx time when the g become blocked
|
|
|
|
waitreason string // if status==gwaiting
|
2015-04-16 22:21:30 -06:00
|
|
|
schedlink guintptr
|
2015-01-05 09:29:21 -07:00
|
|
|
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
|
2014-11-11 15:05:19 -07:00
|
|
|
paniconfault bool // panic (instead of crash) on unexpected fault address
|
|
|
|
preemptscan bool // preempted g does scan for gc
|
|
|
|
gcworkdone bool // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
|
2015-01-26 11:51:39 -07:00
|
|
|
gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan
|
2014-11-11 15:05:19 -07:00
|
|
|
throwsplit bool // must not split stack
|
|
|
|
raceignore int8 // ignore race detection events
|
|
|
|
m *m // for debuggers, but offset not hard-coded
|
|
|
|
lockedm *m
|
|
|
|
sig uint32
|
|
|
|
writebuf []byte
|
|
|
|
sigcode0 uintptr
|
|
|
|
sigcode1 uintptr
|
|
|
|
sigpc uintptr
|
|
|
|
gopc uintptr // pc of go statement that created this goroutine
|
2014-12-12 10:11:27 -07:00
|
|
|
startpc uintptr // pc of goroutine function
|
2014-11-11 15:05:19 -07:00
|
|
|
racectx uintptr
|
|
|
|
waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr)
|
2015-04-13 14:50:20 -06:00
|
|
|
readyg *g // scratch for readyExecute
|
2015-03-16 12:22:00 -06:00
|
|
|
|
|
|
|
// Per-G gcController state
|
|
|
|
gcalloc uintptr // bytes allocated during this GC cycle
|
|
|
|
gcscanwork int64 // scan work done (or stolen) this GC cycle
|
2014-11-11 15:05:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
type mts struct {
|
|
|
|
tv_sec int64
|
|
|
|
tv_nsec int64
|
|
|
|
}
|
|
|
|
|
|
|
|
type mscratch struct {
|
|
|
|
v [6]uintptr
|
|
|
|
}
|
|
|
|
|
|
|
|
type m struct {
|
|
|
|
g0 *g // goroutine with scheduling stack
|
|
|
|
morebuf gobuf // gobuf arg to morestack
|
|
|
|
|
|
|
|
// Fields not known to debuggers.
|
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
|
|
|
procid uint64 // for debuggers, but offset not hard-coded
|
|
|
|
gsignal *g // signal-handling g
|
|
|
|
tls [4]uintptr // thread-local storage (for x86 extern register)
|
2015-04-16 22:21:30 -06:00
|
|
|
mstartfn func()
|
|
|
|
curg *g // current running goroutine
|
|
|
|
caughtsig guintptr // goroutine running during fatal signal
|
|
|
|
p puintptr // attached p for executing go code (nil if not executing go code)
|
|
|
|
nextp puintptr
|
2014-11-11 15:05:19 -07:00
|
|
|
id int32
|
|
|
|
mallocing int32
|
|
|
|
throwing int32
|
2015-01-30 13:30:41 -07:00
|
|
|
preemptoff string // if != "", keep curg running on this m
|
2014-11-11 15:05:19 -07:00
|
|
|
locks int32
|
|
|
|
softfloat int32
|
|
|
|
dying int32
|
|
|
|
profilehz int32
|
|
|
|
helpgc int32
|
|
|
|
spinning bool // m is out of work and is actively looking for work
|
|
|
|
blocked bool // m is blocked on a note
|
2014-11-15 06:00:38 -07:00
|
|
|
inwb bool // m is executing a write barrier
|
|
|
|
printlock int8
|
2014-11-11 15:05:19 -07:00
|
|
|
fastrand uint32
|
|
|
|
ncgocall uint64 // number of cgo calls in total
|
|
|
|
ncgo int32 // number of cgo calls currently in progress
|
|
|
|
park note
|
|
|
|
alllink *m // on allm
|
2015-04-16 22:21:30 -06:00
|
|
|
schedlink muintptr
|
2014-11-11 15:05:19 -07:00
|
|
|
machport uint32 // return address for mach ipc (os x)
|
|
|
|
mcache *mcache
|
|
|
|
lockedg *g
|
|
|
|
createstack [32]uintptr // stack that created this thread.
|
|
|
|
freglo [16]uint32 // d[i] lsb and f[i]
|
|
|
|
freghi [16]uint32 // d[i] msb and f[i+16]
|
|
|
|
fflag uint32 // floating point compare flags
|
|
|
|
locked uint32 // tracking for lockosthread
|
2015-03-17 13:07:05 -06:00
|
|
|
nextwaitm uintptr // next m waiting for lock
|
2014-11-11 15:05:19 -07:00
|
|
|
waitsema uintptr // semaphore for parking on locks
|
|
|
|
waitsemacount uint32
|
|
|
|
waitsemalock uint32
|
|
|
|
gcstats gcstats
|
2015-02-05 17:58:18 -07:00
|
|
|
currentwbuf uintptr // use locks or atomic operations such as xchguinptr to access.
|
2014-11-11 15:05:19 -07:00
|
|
|
needextram bool
|
|
|
|
traceback uint8
|
|
|
|
waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
|
|
|
|
waitlock unsafe.Pointer
|
2014-12-12 10:41:57 -07:00
|
|
|
waittraceev byte
|
2015-02-21 11:01:40 -07:00
|
|
|
waittraceskip int
|
2014-12-12 10:41:57 -07:00
|
|
|
syscalltick uint32
|
2014-11-11 15:05:19 -07:00
|
|
|
//#ifdef GOOS_windows
|
|
|
|
thread uintptr // thread handle
|
|
|
|
// these are here because they are too large to be on the stack
|
|
|
|
// of low-level NOSPLIT functions.
|
|
|
|
libcall libcall
|
|
|
|
libcallpc uintptr // for cpu profiler
|
|
|
|
libcallsp uintptr
|
2015-04-16 22:21:30 -06:00
|
|
|
libcallg guintptr
|
2014-11-11 15:05:19 -07:00
|
|
|
//#endif
|
|
|
|
//#ifdef GOOS_solaris
|
|
|
|
perrno *int32 // pointer to tls errno
|
|
|
|
// these are here because they are too large to be on the stack
|
|
|
|
// of low-level NOSPLIT functions.
|
|
|
|
//LibCall libcall;
|
|
|
|
ts mts
|
|
|
|
scratch mscratch
|
|
|
|
//#endif
|
|
|
|
//#ifdef GOOS_plan9
|
|
|
|
notesig *int8
|
|
|
|
errstr *byte
|
|
|
|
//#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
type p struct {
|
|
|
|
lock mutex
|
|
|
|
|
|
|
|
id int32
|
|
|
|
status uint32 // one of pidle/prunning/...
|
2015-04-16 22:21:30 -06:00
|
|
|
link puintptr
|
|
|
|
schedtick uint32 // incremented on every scheduler call
|
|
|
|
syscalltick uint32 // incremented on every system call
|
|
|
|
m muintptr // back-link to associated m (nil if idle)
|
2014-11-11 15:05:19 -07:00
|
|
|
mcache *mcache
|
2015-02-05 06:35:41 -07:00
|
|
|
|
2015-03-11 13:58:47 -06:00
|
|
|
deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go)
|
2015-02-05 06:35:41 -07:00
|
|
|
deferpoolbuf [5][32]*_defer
|
2014-11-11 15:05:19 -07:00
|
|
|
|
|
|
|
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
|
|
|
|
goidcache uint64
|
|
|
|
goidcacheend uint64
|
|
|
|
|
|
|
|
// Queue of runnable goroutines.
|
|
|
|
runqhead uint32
|
|
|
|
runqtail uint32
|
|
|
|
runq [256]*g
|
|
|
|
|
|
|
|
// Available G's (status == Gdead)
|
|
|
|
gfree *g
|
|
|
|
gfreecnt int32
|
|
|
|
|
2015-02-02 14:33:02 -07:00
|
|
|
sudogcache []*sudog
|
|
|
|
sudogbuf [128]*sudog
|
|
|
|
|
2014-12-12 10:11:27 -07:00
|
|
|
tracebuf *traceBuf
|
|
|
|
|
2015-03-08 18:56:15 -06:00
|
|
|
palloc persistentAlloc // per-P to avoid mutex
|
|
|
|
|
2014-11-11 15:05:19 -07:00
|
|
|
pad [64]byte
|
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
|
|
|
// The max value of GOMAXPROCS.
|
|
|
|
// There are no fundamental restrictions on the value.
|
|
|
|
_MaxGomaxprocs = 1 << 8
|
|
|
|
)
|
|
|
|
|
|
|
|
type schedt struct {
|
|
|
|
lock mutex
|
|
|
|
|
|
|
|
goidgen uint64
|
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
midle muintptr // idle m's waiting for work
|
|
|
|
nmidle int32 // number of idle m's waiting for work
|
|
|
|
nmidlelocked int32 // number of locked m's waiting for work
|
|
|
|
mcount int32 // number of m's that have been created
|
|
|
|
maxmcount int32 // maximum number of m's allowed (or die)
|
2014-11-11 15:05:19 -07:00
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
pidle puintptr // idle p's
|
2014-11-11 15:05:19 -07:00
|
|
|
npidle uint32
|
|
|
|
nmspinning uint32
|
|
|
|
|
|
|
|
// Global runnable queue.
|
2015-04-16 22:21:30 -06:00
|
|
|
runqhead guintptr
|
|
|
|
runqtail guintptr
|
2014-11-11 15:05:19 -07:00
|
|
|
runqsize int32
|
|
|
|
|
|
|
|
// Global cache of dead G's.
|
|
|
|
gflock mutex
|
|
|
|
gfree *g
|
|
|
|
ngfree int32
|
|
|
|
|
2015-02-02 14:33:02 -07:00
|
|
|
// Central cache of sudog structs.
|
|
|
|
sudoglock mutex
|
|
|
|
sudogcache *sudog
|
|
|
|
|
2015-02-05 06:35:41 -07:00
|
|
|
// Central pool of available defer structs of different sizes.
|
|
|
|
deferlock mutex
|
|
|
|
deferpool [5]*_defer
|
|
|
|
|
2014-11-11 15:05:19 -07:00
|
|
|
gcwaiting uint32 // gc is waiting to run
|
|
|
|
stopwait int32
|
|
|
|
stopnote note
|
|
|
|
sysmonwait uint32
|
|
|
|
sysmonnote note
|
|
|
|
lastpoll uint64
|
|
|
|
|
|
|
|
profilehz int32 // cpu profiling rate
|
2015-04-01 11:47:35 -06:00
|
|
|
|
|
|
|
procresizetime int64 // nanotime() of last change to gomaxprocs
|
|
|
|
totaltime int64 // ∫gomaxprocs dt up to procresizetime
|
2014-11-11 15:05:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
|
|
|
|
// The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
|
|
|
|
// External locks are not recursive; a second lock is silently ignored.
|
|
|
|
// The upper bits of m->lockedcount record the nesting depth of calls to lockOSThread
|
|
|
|
// (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
|
|
|
|
// Internal locks can be recursive. For instance, a lock for cgo can occur while the main
|
|
|
|
// goroutine is holding the lock during the initialization phase.
|
|
|
|
const (
|
|
|
|
_LockExternal = 1
|
|
|
|
_LockInternal = 2
|
|
|
|
)
|
|
|
|
|
|
|
|
type sigtabtt struct {
|
|
|
|
flags int32
|
|
|
|
name *int8
|
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
|
|
|
_SigNotify = 1 << 0 // let signal.Notify have signal, even if from kernel
|
|
|
|
_SigKill = 1 << 1 // if signal.Notify doesn't take it, exit quietly
|
|
|
|
_SigThrow = 1 << 2 // if signal.Notify doesn't take it, exit loudly
|
|
|
|
_SigPanic = 1 << 3 // if the signal is from the kernel, panic
|
|
|
|
_SigDefault = 1 << 4 // if the signal isn't explicitly requested, don't monitor it
|
|
|
|
_SigHandling = 1 << 5 // our signal handler is registered
|
|
|
|
_SigIgnored = 1 << 6 // the signal was ignored before we registered for it
|
|
|
|
_SigGoExit = 1 << 7 // cause all runtime procs to exit (only used on Plan 9).
|
2014-12-19 14:16:17 -07:00
|
|
|
_SigSetStack = 1 << 8 // add SA_ONSTACK to libc handler
|
2014-11-11 15:05:19 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
// Layout of in-memory per-function information prepared by linker
|
|
|
|
// See http://golang.org/s/go12symtab.
|
2015-03-11 13:58:47 -06:00
|
|
|
// Keep in sync with linker
|
2014-11-11 15:05:19 -07:00
|
|
|
// and with package debug/gosym and with symtab.go in package runtime.
|
|
|
|
type _func struct {
|
|
|
|
entry uintptr // start pc
|
|
|
|
nameoff int32 // function name
|
|
|
|
|
|
|
|
args int32 // in/out args size
|
|
|
|
frame int32 // legacy frame size; use pcsp if possible
|
|
|
|
|
|
|
|
pcsp int32
|
|
|
|
pcfile int32
|
|
|
|
pcln int32
|
|
|
|
npcdata int32
|
|
|
|
nfuncdata int32
|
|
|
|
}
|
|
|
|
|
|
|
|
// layout of Itab known to compilers
|
|
|
|
// allocated in non-garbage-collected memory
|
|
|
|
type itab struct {
|
|
|
|
inter *interfacetype
|
|
|
|
_type *_type
|
|
|
|
link *itab
|
|
|
|
bad int32
|
|
|
|
unused int32
|
2015-01-06 21:38:44 -07:00
|
|
|
fun [1]uintptr // variable sized
|
2014-11-11 15:05:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Lock-free stack node.
|
2014-11-15 06:00:38 -07:00
|
|
|
// // Also known to export_test.go.
|
2014-11-11 15:05:19 -07:00
|
|
|
type lfnode struct {
|
2014-11-15 06:00:38 -07:00
|
|
|
next uint64
|
2014-11-11 15:05:19 -07:00
|
|
|
pushcnt uintptr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Indicates to write barrier and sychronization task to preform.
|
|
|
|
const (
|
2014-11-15 06:00:38 -07:00
|
|
|
_GCoff = iota // GC not running, write barrier disabled
|
|
|
|
_GCquiesce // unused state
|
|
|
|
_GCstw // unused state
|
|
|
|
_GCscan // GC collecting roots into workbufs, write barrier disabled
|
|
|
|
_GCmark // GC marking from workbufs, write barrier ENABLED
|
|
|
|
_GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED
|
|
|
|
_GCsweep // GC mark completed; sweeping in background, write barrier disabled
|
2014-11-11 15:05:19 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
type forcegcstate struct {
|
|
|
|
lock mutex
|
|
|
|
g *g
|
|
|
|
idle uint32
|
|
|
|
}
|
|
|
|
|
|
|
|
var gcphase uint32
|
|
|
|
|
|
|
|
/*
|
|
|
|
* known to compiler
|
|
|
|
*/
|
|
|
|
const (
|
|
|
|
_Structrnd = regSize
|
|
|
|
)
|
|
|
|
|
2014-12-09 15:40:40 -07:00
|
|
|
// startup_random_data holds random bytes initialized at startup. These come from
|
|
|
|
// the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
|
|
|
|
var startupRandomData []byte
|
|
|
|
|
|
|
|
// extendRandom extends the random numbers in r[:n] to the whole slice r.
|
|
|
|
// Treats n<0 as n==0.
|
|
|
|
func extendRandom(r []byte, n int) {
|
|
|
|
if n < 0 {
|
|
|
|
n = 0
|
|
|
|
}
|
|
|
|
for n < len(r) {
|
|
|
|
// Extend random bits using hash function & time seed
|
|
|
|
w := n
|
|
|
|
if w > 16 {
|
|
|
|
w = 16
|
|
|
|
}
|
2015-01-06 17:42:48 -07:00
|
|
|
h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
|
2014-12-09 15:40:40 -07:00
|
|
|
for i := 0; i < ptrSize && n < len(r); i++ {
|
|
|
|
r[n] = byte(h)
|
|
|
|
n++
|
|
|
|
h >>= 8
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-11-11 15:05:19 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* deferred subroutine calls
|
|
|
|
*/
|
|
|
|
type _defer struct {
|
|
|
|
siz int32
|
|
|
|
started bool
|
2014-12-08 15:18:58 -07:00
|
|
|
sp uintptr // sp at time of defer
|
2014-11-11 15:05:19 -07:00
|
|
|
pc uintptr
|
|
|
|
fn *funcval
|
|
|
|
_panic *_panic // panic that is running defer
|
|
|
|
link *_defer
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* panics
|
|
|
|
*/
|
|
|
|
type _panic struct {
|
|
|
|
argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
|
|
|
|
arg interface{} // argument to panic
|
|
|
|
link *_panic // link to earlier panic
|
|
|
|
recovered bool // whether this panic is over
|
|
|
|
aborted bool // the panic was aborted
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* stack traces
|
|
|
|
*/
|
|
|
|
|
|
|
|
type stkframe struct {
|
|
|
|
fn *_func // function being run
|
|
|
|
pc uintptr // program counter within fn
|
|
|
|
continpc uintptr // program counter where execution can continue, or 0 if not
|
|
|
|
lr uintptr // program counter at caller aka link register
|
|
|
|
sp uintptr // stack pointer at pc
|
|
|
|
fp uintptr // stack pointer at caller aka frame pointer
|
|
|
|
varp uintptr // top of local variables
|
|
|
|
argp uintptr // pointer to function arguments
|
|
|
|
arglen uintptr // number of bytes at argp
|
|
|
|
argmap *bitvector // force use of this argmap
|
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
|
|
|
_TraceRuntimeFrames = 1 << 0 // include frames for internal runtime functions.
|
|
|
|
_TraceTrap = 1 << 1 // the initial PC, SP are from a trap, not a return PC from a call
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// The maximum number of frames we print for a traceback
|
|
|
|
_TracebackMaxFrames = 100
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
emptystring string
|
|
|
|
allg **g
|
|
|
|
allglen uintptr
|
|
|
|
lastg *g
|
|
|
|
allm *m
|
|
|
|
allp [_MaxGomaxprocs + 1]*p
|
|
|
|
gomaxprocs int32
|
|
|
|
panicking uint32
|
|
|
|
goos *int8
|
|
|
|
ncpu int32
|
|
|
|
signote note
|
|
|
|
forcegc forcegcstate
|
|
|
|
sched schedt
|
|
|
|
newprocs int32
|
2015-02-17 04:25:49 -07:00
|
|
|
|
|
|
|
// Information about what cpu features are available.
|
|
|
|
// Set on startup in asm_{x86,amd64}.s.
|
|
|
|
cpuid_ecx uint32
|
|
|
|
cpuid_edx uint32
|
|
|
|
lfenceBeforeRdtsc bool
|
2015-04-09 13:09:52 -06:00
|
|
|
)
|
2015-03-25 18:50:35 -06:00
|
|
|
|
2015-04-09 13:09:52 -06:00
|
|
|
// Set by the linker so the runtime can determine the buildmode.
|
|
|
|
var (
|
|
|
|
islibrary bool // -buildmode=c-shared
|
|
|
|
isarchive bool // -buildmode=c-archive
|
2014-11-11 15:05:19 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mutual exclusion locks. in the uncontended case,
|
|
|
|
* as fast as spin locks (just a few user-level instructions),
|
|
|
|
* but on the contention path they sleep in the kernel.
|
|
|
|
* a zeroed Mutex is unlocked (no need to initialize each lock).
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sleep and wakeup on one-time events.
|
|
|
|
* before any calls to notesleep or notewakeup,
|
|
|
|
* must call noteclear to initialize the Note.
|
|
|
|
* then, exactly one thread can call notesleep
|
|
|
|
* and exactly one thread can call notewakeup (once).
|
|
|
|
* once notewakeup has been called, the notesleep
|
|
|
|
* will return. future notesleep will return immediately.
|
|
|
|
* subsequent noteclear must be called only after
|
|
|
|
* previous notesleep has returned, e.g. it's disallowed
|
|
|
|
* to call noteclear straight after notewakeup.
|
|
|
|
*
|
|
|
|
* notetsleep is like notesleep but wakes up after
|
|
|
|
* a given number of nanoseconds even if the event
|
|
|
|
* has not yet happened. if a goroutine uses notetsleep to
|
|
|
|
* wake up early, it must wait to call noteclear until it
|
|
|
|
* can be sure that no other goroutine is calling
|
|
|
|
* notewakeup.
|
|
|
|
*
|
|
|
|
* notesleep/notetsleep are generally called on g0,
|
|
|
|
* notetsleepg is similar to notetsleep but is called on user g.
|
|
|
|
*/
|
|
|
|
// bool runtime·notetsleep(Note*, int64); // false - timeout
|
|
|
|
// bool runtime·notetsleepg(Note*, int64); // false - timeout
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock-free stack.
|
|
|
|
* Initialize uint64 head to 0, compare with 0 to test for emptiness.
|
|
|
|
* The stack does not keep pointers to nodes,
|
|
|
|
* so they can be garbage collected if there are no other pointers to nodes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
// for mmap, we only pass the lower 32 bits of file offset to the
|
|
|
|
// assembly routine; the higher bits (if required), should be provided
|
|
|
|
// by the assembly routine as 0.
|