2014-11-11 15:05:19 -07:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
|
|
import "unsafe"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* defined constants
|
|
|
|
*/
|
|
|
|
const (
|
|
|
|
// G status
|
|
|
|
//
|
|
|
|
// If you add to this list, add to the list
|
|
|
|
// of "okay during garbage collection" status
|
2015-03-11 13:58:47 -06:00
|
|
|
// in mgcmark.go too.
|
2014-11-11 15:05:19 -07:00
|
|
|
_Gidle = iota // 0
|
|
|
|
_Grunnable // 1 runnable and on a run queue
|
|
|
|
_Grunning // 2
|
|
|
|
_Gsyscall // 3
|
|
|
|
_Gwaiting // 4
|
|
|
|
_Gmoribund_unused // 5 currently unused, but hardcoded in gdb scripts
|
|
|
|
_Gdead // 6
|
|
|
|
_Genqueue // 7 Only the Gscanenqueue is used.
|
|
|
|
_Gcopystack // 8 in this state when newstack is moving the stack
|
|
|
|
// the following encode that the GC is scanning the stack and what to do when it is done
|
|
|
|
_Gscan = 0x1000 // atomicstatus&~Gscan = the non-scan state,
|
|
|
|
// _Gscanidle = _Gscan + _Gidle, // Not used. Gidle only used with newly malloced gs
|
|
|
|
_Gscanrunnable = _Gscan + _Grunnable // 0x1001 When scanning complets make Grunnable (it is already on run queue)
|
|
|
|
_Gscanrunning = _Gscan + _Grunning // 0x1002 Used to tell preemption newstack routine to scan preempted stack.
|
2015-05-05 23:22:34 -06:00
|
|
|
_Gscansyscall = _Gscan + _Gsyscall // 0x1003 When scanning completes make it Gsyscall
|
2014-11-11 15:05:19 -07:00
|
|
|
_Gscanwaiting = _Gscan + _Gwaiting // 0x1004 When scanning completes make it Gwaiting
|
|
|
|
// _Gscanmoribund_unused, // not possible
|
|
|
|
// _Gscandead, // not possible
|
|
|
|
_Gscanenqueue = _Gscan + _Genqueue // When scanning completes make it Grunnable and put on runqueue
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// P status
|
2015-03-27 14:49:12 -06:00
|
|
|
_Pidle = iota
|
|
|
|
_Prunning // Only this P is allowed to change from _Prunning.
|
2014-11-11 15:05:19 -07:00
|
|
|
_Psyscall
|
|
|
|
_Pgcstop
|
|
|
|
_Pdead
|
|
|
|
)
|
|
|
|
|
2014-11-18 10:07:50 -07:00
|
|
|
// The next line makes 'go generate' write the zgen_*.go files with
|
|
|
|
// per-OS and per-arch information, including constants
|
|
|
|
// named goos_$GOOS and goarch_$GOARCH for every
|
|
|
|
// known GOOS and GOARCH. The constant is 1 on the
|
|
|
|
// current system, 0 otherwise; multiplying by them is
|
|
|
|
// useful for defining GOOS- or GOARCH-specific constants.
|
|
|
|
//go:generate go run gengoos.go
|
2014-11-11 15:05:19 -07:00
|
|
|
|
|
|
|
type mutex struct {
|
|
|
|
// Futex-based impl treats it as uint32 key,
|
|
|
|
// while sema-based impl as M* waitm.
|
|
|
|
// Used to be a union, but unions break precise GC.
|
|
|
|
key uintptr
|
|
|
|
}
|
|
|
|
|
|
|
|
type note struct {
|
|
|
|
// Futex-based impl treats it as uint32 key,
|
|
|
|
// while sema-based impl as M* waitm.
|
|
|
|
// Used to be a union, but unions break precise GC.
|
|
|
|
key uintptr
|
|
|
|
}
|
|
|
|
|
|
|
|
type _string struct {
|
|
|
|
str *byte
|
|
|
|
len int
|
|
|
|
}
|
|
|
|
|
|
|
|
type funcval struct {
|
|
|
|
fn uintptr
|
|
|
|
// variable-size, fn-specific data here
|
|
|
|
}
|
|
|
|
|
|
|
|
type iface struct {
|
|
|
|
tab *itab
|
|
|
|
data unsafe.Pointer
|
|
|
|
}
|
|
|
|
|
|
|
|
type eface struct {
|
|
|
|
_type *_type
|
|
|
|
data unsafe.Pointer
|
|
|
|
}
|
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
// The guintptr, muintptr, and puintptr are all used to bypass write barriers.
|
|
|
|
// It is particularly important to avoid write barriers when the current P has
|
|
|
|
// been released, because the GC thinks the world is stopped, and an
|
|
|
|
// unexpected write barrier would not be synchronized with the GC,
|
|
|
|
// which can lead to a half-executed write barrier that has marked the object
|
|
|
|
// but not queued it. If the GC skips the object and completes before the
|
|
|
|
// queuing can occur, it will incorrectly free the object.
|
|
|
|
//
|
|
|
|
// We tried using special assignment functions invoked only when not
|
|
|
|
// holding a running P, but then some updates to a particular memory
|
|
|
|
// word went through write barriers and some did not. This breaks the
|
|
|
|
// write barrier shadow checking mode, and it is also scary: better to have
|
|
|
|
// a word that is completely ignored by the GC than to have one for which
|
|
|
|
// only a few updates are ignored.
|
|
|
|
//
|
|
|
|
// Gs, Ms, and Ps are always reachable via true pointers in the
|
|
|
|
// allgs, allm, and allp lists or (during allocation before they reach those lists)
|
|
|
|
// from stack variables.
|
|
|
|
|
2014-12-22 20:43:49 -07:00
|
|
|
// A guintptr holds a goroutine pointer, but typed as a uintptr
|
2015-04-16 22:21:30 -06:00
|
|
|
// to bypass write barriers. It is used in the Gobuf goroutine state
|
|
|
|
// and in scheduling lists that are manipulated without a P.
|
2014-12-22 20:43:49 -07:00
|
|
|
//
|
|
|
|
// The Gobuf.g goroutine pointer is almost always updated by assembly code.
|
|
|
|
// In one of the few places it is updated by Go code - func save - it must be
|
|
|
|
// treated as a uintptr to avoid a write barrier being emitted at a bad time.
|
|
|
|
// Instead of figuring out how to emit the write barriers missing in the
|
|
|
|
// assembly manipulation, we change the type of the field to uintptr,
|
|
|
|
// so that it does not require write barriers at all.
|
|
|
|
//
|
|
|
|
// Goroutine structs are published in the allg list and never freed.
|
|
|
|
// That will keep the goroutine structs from being collected.
|
|
|
|
// There is never a time that Gobuf.g's contain the only references
|
|
|
|
// to a goroutine: the publishing of the goroutine in allg comes first.
|
|
|
|
// Goroutine pointers are also kept in non-GC-visible places like TLS,
|
|
|
|
// so I can't see them ever moving. If we did want to start moving data
|
|
|
|
// in the GC, we'd need to allocate the goroutine structs from an
|
|
|
|
// alternate arena. Using guintptr doesn't make that problem any worse.
|
|
|
|
type guintptr uintptr
|
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
|
|
|
|
func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
|
runtime: yield time slice to most recently readied G
Currently, when the runtime ready()s a G, it adds it to the end of the
current P's run queue and continues running. If there are many other
things in the run queue, this can result in a significant delay before
the ready()d G actually runs and can hurt fairness when other Gs in
the run queue are CPU hogs. For example, if there are three Gs sharing
a P, one of which is a CPU hog that never voluntarily gives up the P
and the other two of which are doing small amounts of work and
communicating back and forth on an unbuffered channel, the two
communicating Gs will get very little CPU time.
Change this so that when G1 ready()s G2 and then blocks, the scheduler
immediately hands off the remainder of G1's time slice to G2. In the
above example, the two communicating Gs will now act as a unit and
together get half of the CPU time, while the CPU hog gets the other
half of the CPU time.
This fixes the problem demonstrated by the ping-pong benchmark added
in the previous commit:
benchmark old ns/op new ns/op delta
BenchmarkPingPongHog 684287 825 -99.88%
On the x/benchmarks suite, this change improves the performance of
garbage by ~6% (for GOMAXPROCS=1 and 4), and json by 28% and 36% for
GOMAXPROCS=1 and 4. It has negligible effect on heap size.
This has no effect on the go1 benchmark suite since those benchmarks
are mostly single-threaded.
Change-Id: I858a08eaa78f702ea98a5fac99d28a4ac91d339f
Reviewed-on: https://go-review.googlesource.com/9289
Reviewed-by: Rick Hudson <rlh@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-22 12:42:26 -06:00
|
|
|
func (gp *guintptr) cas(old, new guintptr) bool {
|
|
|
|
return casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new))
|
|
|
|
}
|
2014-12-22 20:43:49 -07:00
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
type puintptr uintptr
|
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
|
|
|
|
func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
|
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
type muintptr uintptr
|
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
|
|
|
|
func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
|
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
|
|
|
|
2014-11-11 15:05:19 -07:00
|
|
|
type gobuf struct {
|
|
|
|
// The offsets of sp, pc, and g are known to (hard-coded in) libmach.
|
|
|
|
sp uintptr
|
|
|
|
pc uintptr
|
2014-12-22 20:43:49 -07:00
|
|
|
g guintptr
|
2014-11-11 15:05:19 -07:00
|
|
|
ctxt unsafe.Pointer // this has to be a pointer so that gc scans it
|
|
|
|
ret uintreg
|
|
|
|
lr uintptr
|
2015-01-14 09:09:50 -07:00
|
|
|
bp uintptr // for GOEXPERIMENT=framepointer
|
2014-11-11 15:05:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Known to compiler.
|
2015-03-11 13:58:47 -06:00
|
|
|
// Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
|
2014-11-11 15:05:19 -07:00
|
|
|
type sudog struct {
|
|
|
|
g *g
|
|
|
|
selectdone *uint32
|
|
|
|
next *sudog
|
|
|
|
prev *sudog
|
|
|
|
elem unsafe.Pointer // data element
|
|
|
|
releasetime int64
|
|
|
|
nrelease int32 // -1 for acquire
|
|
|
|
waitlink *sudog // g.waiting list
|
|
|
|
}
|
|
|
|
|
|
|
|
type gcstats struct {
|
|
|
|
// the struct must consist of only uint64's,
|
|
|
|
// because it is casted to uint64[].
|
|
|
|
nhandoff uint64
|
|
|
|
nhandoffcnt uint64
|
|
|
|
nprocyield uint64
|
|
|
|
nosyield uint64
|
|
|
|
nsleep uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
type libcall struct {
|
|
|
|
fn uintptr
|
|
|
|
n uintptr // number of parameters
|
|
|
|
args uintptr // parameters
|
|
|
|
r1 uintptr // return values
|
|
|
|
r2 uintptr
|
|
|
|
err uintptr // error number
|
|
|
|
}
|
|
|
|
|
|
|
|
// describes how to handle callback
|
|
|
|
type wincallbackcontext struct {
|
|
|
|
gobody unsafe.Pointer // go function to call
|
|
|
|
argsize uintptr // callback arguments size (in bytes)
|
|
|
|
restorestack uintptr // adjust stack on return by (in bytes) (386 only)
|
|
|
|
cleanstack bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stack describes a Go execution stack.
|
|
|
|
// The bounds of the stack are exactly [lo, hi),
|
|
|
|
// with no implicit data structures on either side.
|
|
|
|
type stack struct {
|
|
|
|
lo uintptr
|
|
|
|
hi uintptr
|
|
|
|
}
|
|
|
|
|
|
|
|
type g struct {
|
|
|
|
// Stack parameters.
|
|
|
|
// stack describes the actual stack memory: [stack.lo, stack.hi).
|
2015-01-05 09:29:21 -07:00
|
|
|
// stackguard0 is the stack pointer compared in the Go stack growth prologue.
|
2014-11-11 15:05:19 -07:00
|
|
|
// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
|
2015-01-05 09:29:21 -07:00
|
|
|
// stackguard1 is the stack pointer compared in the C stack growth prologue.
|
|
|
|
// It is stack.lo+StackGuard on g0 and gsignal stacks.
|
|
|
|
// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
|
|
|
|
stack stack // offset known to runtime/cgo
|
|
|
|
stackguard0 uintptr // offset known to liblink
|
|
|
|
stackguard1 uintptr // offset known to liblink
|
2014-11-11 15:05:19 -07:00
|
|
|
|
|
|
|
_panic *_panic // innermost panic - offset known to liblink
|
|
|
|
_defer *_defer // innermost defer
|
|
|
|
sched gobuf
|
2015-05-05 23:22:34 -06:00
|
|
|
syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
|
|
|
|
syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
|
2014-11-11 15:05:19 -07:00
|
|
|
param unsafe.Pointer // passed parameter on wakeup
|
|
|
|
atomicstatus uint32
|
|
|
|
goid int64
|
|
|
|
waitsince int64 // approx time when the g become blocked
|
2015-05-05 23:22:34 -06:00
|
|
|
waitreason string // if status==Gwaiting
|
2015-04-16 22:21:30 -06:00
|
|
|
schedlink guintptr
|
2015-01-05 09:29:21 -07:00
|
|
|
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
|
2014-11-11 15:05:19 -07:00
|
|
|
paniconfault bool // panic (instead of crash) on unexpected fault address
|
|
|
|
preemptscan bool // preempted g does scan for gc
|
|
|
|
gcworkdone bool // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
|
2015-01-26 11:51:39 -07:00
|
|
|
gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan
|
2014-11-11 15:05:19 -07:00
|
|
|
throwsplit bool // must not split stack
|
|
|
|
raceignore int8 // ignore race detection events
|
|
|
|
m *m // for debuggers, but offset not hard-coded
|
|
|
|
lockedm *m
|
|
|
|
sig uint32
|
|
|
|
writebuf []byte
|
|
|
|
sigcode0 uintptr
|
|
|
|
sigcode1 uintptr
|
|
|
|
sigpc uintptr
|
|
|
|
gopc uintptr // pc of go statement that created this goroutine
|
2014-12-12 10:11:27 -07:00
|
|
|
startpc uintptr // pc of goroutine function
|
2014-11-11 15:05:19 -07:00
|
|
|
racectx uintptr
|
|
|
|
waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr)
|
2015-04-13 14:50:20 -06:00
|
|
|
readyg *g // scratch for readyExecute
|
2015-03-16 12:22:00 -06:00
|
|
|
|
|
|
|
// Per-G gcController state
|
|
|
|
gcalloc uintptr // bytes allocated during this GC cycle
|
|
|
|
gcscanwork int64 // scan work done (or stolen) this GC cycle
|
2014-11-11 15:05:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
type mts struct {
|
|
|
|
tv_sec int64
|
|
|
|
tv_nsec int64
|
|
|
|
}
|
|
|
|
|
|
|
|
type mscratch struct {
|
|
|
|
v [6]uintptr
|
|
|
|
}
|
|
|
|
|
|
|
|
type m struct {
|
|
|
|
g0 *g // goroutine with scheduling stack
|
|
|
|
morebuf gobuf // gobuf arg to morestack
|
|
|
|
|
|
|
|
// Fields not known to debuggers.
|
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
|
|
|
procid uint64 // for debuggers, but offset not hard-coded
|
|
|
|
gsignal *g // signal-handling g
|
|
|
|
tls [4]uintptr // thread-local storage (for x86 extern register)
|
2015-04-16 22:21:30 -06:00
|
|
|
mstartfn func()
|
|
|
|
curg *g // current running goroutine
|
|
|
|
caughtsig guintptr // goroutine running during fatal signal
|
|
|
|
p puintptr // attached p for executing go code (nil if not executing go code)
|
|
|
|
nextp puintptr
|
2014-11-11 15:05:19 -07:00
|
|
|
id int32
|
|
|
|
mallocing int32
|
|
|
|
throwing int32
|
2015-01-30 13:30:41 -07:00
|
|
|
preemptoff string // if != "", keep curg running on this m
|
2014-11-11 15:05:19 -07:00
|
|
|
locks int32
|
|
|
|
softfloat int32
|
|
|
|
dying int32
|
|
|
|
profilehz int32
|
|
|
|
helpgc int32
|
|
|
|
spinning bool // m is out of work and is actively looking for work
|
|
|
|
blocked bool // m is blocked on a note
|
2014-11-15 06:00:38 -07:00
|
|
|
inwb bool // m is executing a write barrier
|
|
|
|
printlock int8
|
2014-11-11 15:05:19 -07:00
|
|
|
fastrand uint32
|
|
|
|
ncgocall uint64 // number of cgo calls in total
|
|
|
|
ncgo int32 // number of cgo calls currently in progress
|
|
|
|
park note
|
|
|
|
alllink *m // on allm
|
2015-04-16 22:21:30 -06:00
|
|
|
schedlink muintptr
|
2014-11-11 15:05:19 -07:00
|
|
|
machport uint32 // return address for mach ipc (os x)
|
|
|
|
mcache *mcache
|
|
|
|
lockedg *g
|
|
|
|
createstack [32]uintptr // stack that created this thread.
|
|
|
|
freglo [16]uint32 // d[i] lsb and f[i]
|
|
|
|
freghi [16]uint32 // d[i] msb and f[i+16]
|
|
|
|
fflag uint32 // floating point compare flags
|
|
|
|
locked uint32 // tracking for lockosthread
|
2015-03-17 13:07:05 -06:00
|
|
|
nextwaitm uintptr // next m waiting for lock
|
2014-11-11 15:05:19 -07:00
|
|
|
waitsema uintptr // semaphore for parking on locks
|
|
|
|
waitsemacount uint32
|
|
|
|
waitsemalock uint32
|
|
|
|
gcstats gcstats
|
|
|
|
needextram bool
|
|
|
|
traceback uint8
|
|
|
|
waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
|
|
|
|
waitlock unsafe.Pointer
|
2014-12-12 10:41:57 -07:00
|
|
|
waittraceev byte
|
2015-02-21 11:01:40 -07:00
|
|
|
waittraceskip int
|
2014-12-12 10:41:57 -07:00
|
|
|
syscalltick uint32
|
2014-11-11 15:05:19 -07:00
|
|
|
//#ifdef GOOS_windows
|
|
|
|
thread uintptr // thread handle
|
|
|
|
// these are here because they are too large to be on the stack
|
|
|
|
// of low-level NOSPLIT functions.
|
|
|
|
libcall libcall
|
|
|
|
libcallpc uintptr // for cpu profiler
|
|
|
|
libcallsp uintptr
|
2015-04-16 22:21:30 -06:00
|
|
|
libcallg guintptr
|
2014-11-11 15:05:19 -07:00
|
|
|
//#endif
|
|
|
|
//#ifdef GOOS_solaris
|
|
|
|
perrno *int32 // pointer to tls errno
|
|
|
|
// these are here because they are too large to be on the stack
|
|
|
|
// of low-level NOSPLIT functions.
|
|
|
|
//LibCall libcall;
|
|
|
|
ts mts
|
|
|
|
scratch mscratch
|
|
|
|
//#endif
|
|
|
|
//#ifdef GOOS_plan9
|
|
|
|
notesig *int8
|
|
|
|
errstr *byte
|
|
|
|
//#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
type p struct {
|
|
|
|
lock mutex
|
|
|
|
|
|
|
|
id int32
|
|
|
|
status uint32 // one of pidle/prunning/...
|
2015-04-16 22:21:30 -06:00
|
|
|
link puintptr
|
|
|
|
schedtick uint32 // incremented on every scheduler call
|
|
|
|
syscalltick uint32 // incremented on every system call
|
|
|
|
m muintptr // back-link to associated m (nil if idle)
|
2014-11-11 15:05:19 -07:00
|
|
|
mcache *mcache
|
2015-02-05 06:35:41 -07:00
|
|
|
|
2015-03-11 13:58:47 -06:00
|
|
|
deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go)
|
2015-02-05 06:35:41 -07:00
|
|
|
deferpoolbuf [5][32]*_defer
|
2014-11-11 15:05:19 -07:00
|
|
|
|
|
|
|
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
|
|
|
|
goidcache uint64
|
|
|
|
goidcacheend uint64
|
|
|
|
|
runtime: yield time slice to most recently readied G
Currently, when the runtime ready()s a G, it adds it to the end of the
current P's run queue and continues running. If there are many other
things in the run queue, this can result in a significant delay before
the ready()d G actually runs and can hurt fairness when other Gs in
the run queue are CPU hogs. For example, if there are three Gs sharing
a P, one of which is a CPU hog that never voluntarily gives up the P
and the other two of which are doing small amounts of work and
communicating back and forth on an unbuffered channel, the two
communicating Gs will get very little CPU time.
Change this so that when G1 ready()s G2 and then blocks, the scheduler
immediately hands off the remainder of G1's time slice to G2. In the
above example, the two communicating Gs will now act as a unit and
together get half of the CPU time, while the CPU hog gets the other
half of the CPU time.
This fixes the problem demonstrated by the ping-pong benchmark added
in the previous commit:
benchmark old ns/op new ns/op delta
BenchmarkPingPongHog 684287 825 -99.88%
On the x/benchmarks suite, this change improves the performance of
garbage by ~6% (for GOMAXPROCS=1 and 4), and json by 28% and 36% for
GOMAXPROCS=1 and 4. It has negligible effect on heap size.
This has no effect on the go1 benchmark suite since those benchmarks
are mostly single-threaded.
Change-Id: I858a08eaa78f702ea98a5fac99d28a4ac91d339f
Reviewed-on: https://go-review.googlesource.com/9289
Reviewed-by: Rick Hudson <rlh@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-22 12:42:26 -06:00
|
|
|
// Queue of runnable goroutines. Accessed without lock.
|
runtime: reduce thrashing of gs between ps
One important use case is a pipeline computation that pass values
from one Goroutine to the next and then exits or is placed in a
wait state. If GOMAXPROCS > 1 a Goroutine running on P1 will enable
another Goroutine and then immediately make P1 available to execute
it. We need to prevent other Ps from stealing the G that P1 is about
to execute. Otherwise the Gs can thrash between Ps causing unneeded
synchronization and slowing down throughput.
Fix this by changing the stealing logic so that when a P attempts to
steal the only G on some other P's run queue, it will pause
momentarily to allow the victim P to schedule the G.
As part of optimizing stealing we also use a per P victim queue
move stolen gs. This eliminates the zeroing of a stack local victim
queue which turned out to be expensive.
This CL is a necessary but not sufficient prerequisite to changing
the default value of GOMAXPROCS to something > 1 which is another
CL/discussion.
For highly serialized programs, such as GoroutineRing below this can
make a large difference. For larger and more parallel programs such
as the x/benchmarks there is no noticeable detriment.
~/work/code/src/rsc.io/benchstat/benchstat old.txt new.txt
name old mean new mean delta
GoroutineRing 30.2µs × (0.98,1.01) 30.1µs × (0.97,1.04) ~ (p=0.941)
GoroutineRing-2 113µs × (0.91,1.07) 30µs × (0.98,1.03) -73.17% (p=0.004)
GoroutineRing-4 144µs × (0.98,1.02) 32µs × (0.98,1.01) -77.69% (p=0.000)
GoroutineRingBuf 32.7µs × (0.97,1.03) 32.5µs × (0.97,1.02) ~ (p=0.795)
GoroutineRingBuf-2 120µs × (0.92,1.08) 33µs × (1.00,1.00) -72.48% (p=0.004)
GoroutineRingBuf-4 138µs × (0.92,1.06) 33µs × (1.00,1.00) -76.21% (p=0.003)
The bench benchmarks show little impact.
old new
garbage 7032879 7011696
httpold 25509 25301
splayold 1022073 1019499
jsonold 28230624 28081433
Change-Id: I228c48fed8d85c9bbef16a7edc53ab7898506f50
Reviewed-on: https://go-review.googlesource.com/9872
Reviewed-by: Austin Clements <austin@google.com>
2015-05-07 15:19:30 -06:00
|
|
|
runqhead uint32
|
|
|
|
runqtail uint32
|
|
|
|
runq [256]*g
|
|
|
|
runqvictims [128]*g // Used to stage victims from another p's runq
|
runtime: yield time slice to most recently readied G
Currently, when the runtime ready()s a G, it adds it to the end of the
current P's run queue and continues running. If there are many other
things in the run queue, this can result in a significant delay before
the ready()d G actually runs and can hurt fairness when other Gs in
the run queue are CPU hogs. For example, if there are three Gs sharing
a P, one of which is a CPU hog that never voluntarily gives up the P
and the other two of which are doing small amounts of work and
communicating back and forth on an unbuffered channel, the two
communicating Gs will get very little CPU time.
Change this so that when G1 ready()s G2 and then blocks, the scheduler
immediately hands off the remainder of G1's time slice to G2. In the
above example, the two communicating Gs will now act as a unit and
together get half of the CPU time, while the CPU hog gets the other
half of the CPU time.
This fixes the problem demonstrated by the ping-pong benchmark added
in the previous commit:
benchmark old ns/op new ns/op delta
BenchmarkPingPongHog 684287 825 -99.88%
On the x/benchmarks suite, this change improves the performance of
garbage by ~6% (for GOMAXPROCS=1 and 4), and json by 28% and 36% for
GOMAXPROCS=1 and 4. It has negligible effect on heap size.
This has no effect on the go1 benchmark suite since those benchmarks
are mostly single-threaded.
Change-Id: I858a08eaa78f702ea98a5fac99d28a4ac91d339f
Reviewed-on: https://go-review.googlesource.com/9289
Reviewed-by: Rick Hudson <rlh@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-22 12:42:26 -06:00
|
|
|
// runnext, if non-nil, is a runnable G that was ready'd by
|
|
|
|
// the current G and should be run next instead of what's in
|
|
|
|
// runq if there's time remaining in the running G's time
|
|
|
|
// slice. It will inherit the time left in the current time
|
|
|
|
// slice. If a set of goroutines is locked in a
|
|
|
|
// communicate-and-wait pattern, this schedules that set as a
|
|
|
|
// unit and eliminates the (potentially large) scheduling
|
|
|
|
// latency that otherwise arises from adding the ready'd
|
|
|
|
// goroutines to the end of the run queue.
|
|
|
|
runnext guintptr
|
2014-11-11 15:05:19 -07:00
|
|
|
|
|
|
|
// Available G's (status == Gdead)
|
|
|
|
gfree *g
|
|
|
|
gfreecnt int32
|
|
|
|
|
2015-02-02 14:33:02 -07:00
|
|
|
sudogcache []*sudog
|
|
|
|
sudogbuf [128]*sudog
|
|
|
|
|
2014-12-12 10:11:27 -07:00
|
|
|
tracebuf *traceBuf
|
|
|
|
|
2015-03-08 18:56:15 -06:00
|
|
|
palloc persistentAlloc // per-P to avoid mutex
|
|
|
|
|
2015-03-17 10:17:47 -06:00
|
|
|
// Per-P GC state
|
2015-04-15 15:01:30 -06:00
|
|
|
gcAssistTime int64 // Nanoseconds in assistAlloc
|
|
|
|
gcBgMarkWorker *g
|
|
|
|
gcMarkWorkerMode gcMarkWorkerMode
|
2015-03-17 10:17:47 -06:00
|
|
|
|
runtime: replace per-M workbuf cache with per-P gcWork cache
Currently, each M has a cache of the most recently used *workbuf. This
is used primarily by the write barrier so it doesn't have to access
the global workbuf lists on every write barrier. It's also used by
stack scanning because it's convenient.
This cache is important for write barrier performance, but this
particular approach has several downsides. It's faster than no cache,
but far from optimal (as the benchmarks below show). It's complex:
access to the cache is sprinkled through most of the workbuf list
operations and it requires special care to transform into and back out
of the gcWork cache that's actually used for scanning and marking. It
requires atomic exchanges to take ownership of the cached workbuf and
to return it to the M's cache even though it's almost always used by
only the current M. Since it's per-M, flushing these caches is O(# of
Ms), which may be high. And it has some significant subtleties: for
example, in general the cache shouldn't be used after the
harvestwbufs() in mark termination because it could hide work from
mark termination, but stack scanning can happen after this and *will*
use the cache (but it turns out this is okay because it will always be
followed by a getfull(), which drains the cache).
This change replaces this cache with a per-P gcWork object. This
gcWork cache can be used directly by scanning and marking (as long as
preemption is disabled, which is a general requirement of gcWork).
Since it's per-P, it doesn't require synchronization, which simplifies
things and means the only atomic operations in the write barrier are
occasionally fetching new work buffers and setting a mark bit if the
object isn't already marked. This cache can be flushed in O(# of Ps),
which is generally small. It follows a simple flushing rule: the cache
can be used during any phase, but during mark termination it must be
flushed before allowing preemption. This also makes the dispose during
mutator assist no longer necessary, which eliminates the vast majority
of gcWork dispose calls and reduces contention on the global workbuf
lists. And it's a lot faster on some benchmarks:
benchmark old ns/op new ns/op delta
BenchmarkBinaryTree17 11963668673 11206112763 -6.33%
BenchmarkFannkuch11 2643217136 2649182499 +0.23%
BenchmarkFmtFprintfEmpty 70.4 70.2 -0.28%
BenchmarkFmtFprintfString 364 307 -15.66%
BenchmarkFmtFprintfInt 317 282 -11.04%
BenchmarkFmtFprintfIntInt 512 483 -5.66%
BenchmarkFmtFprintfPrefixedInt 404 380 -5.94%
BenchmarkFmtFprintfFloat 521 479 -8.06%
BenchmarkFmtManyArgs 2164 1894 -12.48%
BenchmarkGobDecode 30366146 22429593 -26.14%
BenchmarkGobEncode 29867472 26663152 -10.73%
BenchmarkGzip 391236616 396779490 +1.42%
BenchmarkGunzip 96639491 96297024 -0.35%
BenchmarkHTTPClientServer 100110 70763 -29.31%
BenchmarkJSONEncode 51866051 52511382 +1.24%
BenchmarkJSONDecode 103813138 86094963 -17.07%
BenchmarkMandelbrot200 4121834 4120886 -0.02%
BenchmarkGoParse 16472789 5879949 -64.31%
BenchmarkRegexpMatchEasy0_32 140 140 +0.00%
BenchmarkRegexpMatchEasy0_1K 394 394 +0.00%
BenchmarkRegexpMatchEasy1_32 120 120 +0.00%
BenchmarkRegexpMatchEasy1_1K 621 614 -1.13%
BenchmarkRegexpMatchMedium_32 209 202 -3.35%
BenchmarkRegexpMatchMedium_1K 54889 55175 +0.52%
BenchmarkRegexpMatchHard_32 2682 2675 -0.26%
BenchmarkRegexpMatchHard_1K 79383 79524 +0.18%
BenchmarkRevcomp 584116718 584595320 +0.08%
BenchmarkTemplate 125400565 109620196 -12.58%
BenchmarkTimeParse 386 387 +0.26%
BenchmarkTimeFormat 580 447 -22.93%
(Best out of 10 runs. The delta of averages is similar.)
This also puts us in a good position to flush these caches when
nearing the end of concurrent marking, which will let us increase the
size of the work buffers while still controlling mark termination
pause time.
Change-Id: I2dd94c8517a19297a98ec280203cccaa58792522
Reviewed-on: https://go-review.googlesource.com/9178
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-19 13:22:20 -06:00
|
|
|
// gcw is this P's GC work buffer cache. The work buffer is
|
|
|
|
// filled by write barriers, drained by mutator assists, and
|
|
|
|
// disposed on certain GC state transitions.
|
|
|
|
gcw gcWork
|
|
|
|
|
2015-03-27 14:49:12 -06:00
|
|
|
runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point
|
|
|
|
|
2014-11-11 15:05:19 -07:00
|
|
|
pad [64]byte
|
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
|
|
|
// The max value of GOMAXPROCS.
|
|
|
|
// There are no fundamental restrictions on the value.
|
|
|
|
_MaxGomaxprocs = 1 << 8
|
|
|
|
)
|
|
|
|
|
|
|
|
type schedt struct {
|
|
|
|
lock mutex
|
|
|
|
|
|
|
|
goidgen uint64
|
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
midle muintptr // idle m's waiting for work
|
|
|
|
nmidle int32 // number of idle m's waiting for work
|
|
|
|
nmidlelocked int32 // number of locked m's waiting for work
|
|
|
|
mcount int32 // number of m's that have been created
|
|
|
|
maxmcount int32 // maximum number of m's allowed (or die)
|
2014-11-11 15:05:19 -07:00
|
|
|
|
2015-04-16 22:21:30 -06:00
|
|
|
pidle puintptr // idle p's
|
2014-11-11 15:05:19 -07:00
|
|
|
npidle uint32
|
|
|
|
nmspinning uint32
|
|
|
|
|
|
|
|
// Global runnable queue.
|
2015-04-16 22:21:30 -06:00
|
|
|
runqhead guintptr
|
|
|
|
runqtail guintptr
|
2014-11-11 15:05:19 -07:00
|
|
|
runqsize int32
|
|
|
|
|
|
|
|
// Global cache of dead G's.
|
|
|
|
gflock mutex
|
|
|
|
gfree *g
|
|
|
|
ngfree int32
|
|
|
|
|
2015-02-02 14:33:02 -07:00
|
|
|
// Central cache of sudog structs.
|
|
|
|
sudoglock mutex
|
|
|
|
sudogcache *sudog
|
|
|
|
|
2015-02-05 06:35:41 -07:00
|
|
|
// Central pool of available defer structs of different sizes.
|
|
|
|
deferlock mutex
|
|
|
|
deferpool [5]*_defer
|
|
|
|
|
2014-11-11 15:05:19 -07:00
|
|
|
gcwaiting uint32 // gc is waiting to run
|
|
|
|
stopwait int32
|
|
|
|
stopnote note
|
|
|
|
sysmonwait uint32
|
|
|
|
sysmonnote note
|
|
|
|
lastpoll uint64
|
|
|
|
|
2015-03-27 14:49:12 -06:00
|
|
|
// safepointFn should be called on each P at the next GC
|
|
|
|
// safepoint if p.runSafePointFn is set.
|
|
|
|
safePointFn func(*p)
|
|
|
|
|
2014-11-11 15:05:19 -07:00
|
|
|
profilehz int32 // cpu profiling rate
|
2015-04-01 11:47:35 -06:00
|
|
|
|
|
|
|
procresizetime int64 // nanotime() of last change to gomaxprocs
|
|
|
|
totaltime int64 // ∫gomaxprocs dt up to procresizetime
|
2014-11-11 15:05:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
|
|
|
|
// The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
|
|
|
|
// External locks are not recursive; a second lock is silently ignored.
|
|
|
|
// The upper bits of m->lockedcount record the nesting depth of calls to lockOSThread
|
|
|
|
// (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
|
|
|
|
// Internal locks can be recursive. For instance, a lock for cgo can occur while the main
|
|
|
|
// goroutine is holding the lock during the initialization phase.
|
|
|
|
const (
|
|
|
|
_LockExternal = 1
|
|
|
|
_LockInternal = 2
|
|
|
|
)
|
|
|
|
|
|
|
|
type sigtabtt struct {
|
|
|
|
flags int32
|
|
|
|
name *int8
|
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
|
|
|
_SigNotify = 1 << 0 // let signal.Notify have signal, even if from kernel
|
|
|
|
_SigKill = 1 << 1 // if signal.Notify doesn't take it, exit quietly
|
|
|
|
_SigThrow = 1 << 2 // if signal.Notify doesn't take it, exit loudly
|
|
|
|
_SigPanic = 1 << 3 // if the signal is from the kernel, panic
|
|
|
|
_SigDefault = 1 << 4 // if the signal isn't explicitly requested, don't monitor it
|
|
|
|
_SigHandling = 1 << 5 // our signal handler is registered
|
|
|
|
_SigIgnored = 1 << 6 // the signal was ignored before we registered for it
|
|
|
|
_SigGoExit = 1 << 7 // cause all runtime procs to exit (only used on Plan 9).
|
2014-12-19 14:16:17 -07:00
|
|
|
_SigSetStack = 1 << 8 // add SA_ONSTACK to libc handler
|
2014-11-11 15:05:19 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
// Layout of in-memory per-function information prepared by linker
|
|
|
|
// See http://golang.org/s/go12symtab.
|
2015-03-11 13:58:47 -06:00
|
|
|
// Keep in sync with linker
|
2014-11-11 15:05:19 -07:00
|
|
|
// and with package debug/gosym and with symtab.go in package runtime.
|
|
|
|
type _func struct {
|
|
|
|
entry uintptr // start pc
|
|
|
|
nameoff int32 // function name
|
|
|
|
|
|
|
|
args int32 // in/out args size
|
|
|
|
frame int32 // legacy frame size; use pcsp if possible
|
|
|
|
|
|
|
|
pcsp int32
|
|
|
|
pcfile int32
|
|
|
|
pcln int32
|
|
|
|
npcdata int32
|
|
|
|
nfuncdata int32
|
|
|
|
}
|
|
|
|
|
|
|
|
// layout of Itab known to compilers
|
|
|
|
// allocated in non-garbage-collected memory
|
|
|
|
type itab struct {
|
|
|
|
inter *interfacetype
|
|
|
|
_type *_type
|
|
|
|
link *itab
|
|
|
|
bad int32
|
|
|
|
unused int32
|
2015-01-06 21:38:44 -07:00
|
|
|
fun [1]uintptr // variable sized
|
2014-11-11 15:05:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Lock-free stack node.
|
2014-11-15 06:00:38 -07:00
|
|
|
// // Also known to export_test.go.
|
2014-11-11 15:05:19 -07:00
|
|
|
type lfnode struct {
|
2014-11-15 06:00:38 -07:00
|
|
|
next uint64
|
2014-11-11 15:05:19 -07:00
|
|
|
pushcnt uintptr
|
|
|
|
}
|
|
|
|
|
|
|
|
type forcegcstate struct {
|
|
|
|
lock mutex
|
|
|
|
g *g
|
|
|
|
idle uint32
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* known to compiler
|
|
|
|
*/
|
|
|
|
const (
|
|
|
|
_Structrnd = regSize
|
|
|
|
)
|
|
|
|
|
2014-12-09 15:40:40 -07:00
|
|
|
// startup_random_data holds random bytes initialized at startup. These come from
|
|
|
|
// the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
|
|
|
|
var startupRandomData []byte
|
|
|
|
|
|
|
|
// extendRandom extends the random numbers in r[:n] to the whole slice r.
|
|
|
|
// Treats n<0 as n==0.
|
|
|
|
func extendRandom(r []byte, n int) {
|
|
|
|
if n < 0 {
|
|
|
|
n = 0
|
|
|
|
}
|
|
|
|
for n < len(r) {
|
|
|
|
// Extend random bits using hash function & time seed
|
|
|
|
w := n
|
|
|
|
if w > 16 {
|
|
|
|
w = 16
|
|
|
|
}
|
2015-01-06 17:42:48 -07:00
|
|
|
h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
|
2014-12-09 15:40:40 -07:00
|
|
|
for i := 0; i < ptrSize && n < len(r); i++ {
|
|
|
|
r[n] = byte(h)
|
|
|
|
n++
|
|
|
|
h >>= 8
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-11-11 15:05:19 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* deferred subroutine calls
|
|
|
|
*/
|
|
|
|
type _defer struct {
|
|
|
|
siz int32
|
|
|
|
started bool
|
2014-12-08 15:18:58 -07:00
|
|
|
sp uintptr // sp at time of defer
|
2014-11-11 15:05:19 -07:00
|
|
|
pc uintptr
|
|
|
|
fn *funcval
|
|
|
|
_panic *_panic // panic that is running defer
|
|
|
|
link *_defer
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* panics
|
|
|
|
*/
|
|
|
|
type _panic struct {
|
|
|
|
argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
|
|
|
|
arg interface{} // argument to panic
|
|
|
|
link *_panic // link to earlier panic
|
|
|
|
recovered bool // whether this panic is over
|
|
|
|
aborted bool // the panic was aborted
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* stack traces
|
|
|
|
*/
|
|
|
|
|
|
|
|
type stkframe struct {
|
|
|
|
fn *_func // function being run
|
|
|
|
pc uintptr // program counter within fn
|
|
|
|
continpc uintptr // program counter where execution can continue, or 0 if not
|
|
|
|
lr uintptr // program counter at caller aka link register
|
|
|
|
sp uintptr // stack pointer at pc
|
|
|
|
fp uintptr // stack pointer at caller aka frame pointer
|
|
|
|
varp uintptr // top of local variables
|
|
|
|
argp uintptr // pointer to function arguments
|
|
|
|
arglen uintptr // number of bytes at argp
|
|
|
|
argmap *bitvector // force use of this argmap
|
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
2015-04-30 08:32:54 -06:00
|
|
|
_TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions.
|
|
|
|
_TraceTrap // the initial PC, SP are from a trap, not a return PC from a call
|
|
|
|
_TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it
|
2014-11-11 15:05:19 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// The maximum number of frames we print for a traceback
|
|
|
|
_TracebackMaxFrames = 100
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
emptystring string
|
|
|
|
allg **g
|
|
|
|
allglen uintptr
|
|
|
|
lastg *g
|
|
|
|
allm *m
|
|
|
|
allp [_MaxGomaxprocs + 1]*p
|
|
|
|
gomaxprocs int32
|
|
|
|
panicking uint32
|
|
|
|
goos *int8
|
|
|
|
ncpu int32
|
|
|
|
signote note
|
|
|
|
forcegc forcegcstate
|
|
|
|
sched schedt
|
|
|
|
newprocs int32
|
2015-02-17 04:25:49 -07:00
|
|
|
|
|
|
|
// Information about what cpu features are available.
|
|
|
|
// Set on startup in asm_{x86,amd64}.s.
|
|
|
|
cpuid_ecx uint32
|
|
|
|
cpuid_edx uint32
|
|
|
|
lfenceBeforeRdtsc bool
|
2015-04-09 13:09:52 -06:00
|
|
|
)
|
2015-03-25 18:50:35 -06:00
|
|
|
|
2015-04-09 13:09:52 -06:00
|
|
|
// Set by the linker so the runtime can determine the buildmode.
|
|
|
|
var (
|
|
|
|
islibrary bool // -buildmode=c-shared
|
|
|
|
isarchive bool // -buildmode=c-archive
|
2014-11-11 15:05:19 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mutual exclusion locks. in the uncontended case,
|
|
|
|
* as fast as spin locks (just a few user-level instructions),
|
|
|
|
* but on the contention path they sleep in the kernel.
|
|
|
|
* a zeroed Mutex is unlocked (no need to initialize each lock).
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sleep and wakeup on one-time events.
|
|
|
|
* before any calls to notesleep or notewakeup,
|
|
|
|
* must call noteclear to initialize the Note.
|
|
|
|
* then, exactly one thread can call notesleep
|
|
|
|
* and exactly one thread can call notewakeup (once).
|
|
|
|
* once notewakeup has been called, the notesleep
|
|
|
|
* will return. future notesleep will return immediately.
|
|
|
|
* subsequent noteclear must be called only after
|
|
|
|
* previous notesleep has returned, e.g. it's disallowed
|
|
|
|
* to call noteclear straight after notewakeup.
|
|
|
|
*
|
|
|
|
* notetsleep is like notesleep but wakes up after
|
|
|
|
* a given number of nanoseconds even if the event
|
|
|
|
* has not yet happened. if a goroutine uses notetsleep to
|
|
|
|
* wake up early, it must wait to call noteclear until it
|
|
|
|
* can be sure that no other goroutine is calling
|
|
|
|
* notewakeup.
|
|
|
|
*
|
|
|
|
* notesleep/notetsleep are generally called on g0,
|
|
|
|
* notetsleepg is similar to notetsleep but is called on user g.
|
|
|
|
*/
|
|
|
|
// bool runtime·notetsleep(Note*, int64); // false - timeout
|
|
|
|
// bool runtime·notetsleepg(Note*, int64); // false - timeout
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock-free stack.
|
|
|
|
* Initialize uint64 head to 0, compare with 0 to test for emptiness.
|
|
|
|
* The stack does not keep pointers to nodes,
|
|
|
|
* so they can be garbage collected if there are no other pointers to nodes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
// for mmap, we only pass the lower 32 bits of file offset to the
|
|
|
|
// assembly routine; the higher bits (if required), should be provided
|
|
|
|
// by the assembly routine as 0.
|