2014-08-19 01:49:59 -06:00
|
|
|
// Copyright 2014 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
2014-08-21 10:41:09 -06:00
|
|
|
import "unsafe"
|
|
|
|
|
2014-12-22 11:27:53 -07:00
|
|
|
//go:linkname runtime_init runtime.init
|
liblink, runtime: diagnose and fix C code running on Go stack
This CL contains compiler+runtime changes that detect C code
running on Go (not g0, not gsignal) stacks, and it contains
corrections for what it detected.
The detection works by changing the C prologue to use a different
stack guard word in the G than Go prologue does. On the g0 and
gsignal stacks, that stack guard word is set to the usual
stack guard value. But on ordinary Go stacks, that stack
guard word is set to ^0, which will make any stack split
check fail. The C prologue then calls morestackc instead
of morestack, and morestackc aborts the program with
a message about running C code on a Go stack.
This check catches all C code running on the Go stack
except NOSPLIT code. The NOSPLIT code is allowed,
so the check is complete. Since it is a dynamic check,
the code must execute to be caught. But unlike the static
checks we've been using in cmd/ld, the dynamic check
works with function pointers and other indirect calls.
For example it caught sigpanic being pushed onto Go
stacks in the signal handlers.
Fixes #8667.
LGTM=khr, iant
R=golang-codereviews, khr, iant
CC=golang-codereviews, r
https://golang.org/cl/133700043
2014-09-08 12:05:23 -06:00
|
|
|
func runtime_init()
|
2014-12-22 11:27:53 -07:00
|
|
|
|
|
|
|
//go:linkname main_init main.init
|
liblink, runtime: diagnose and fix C code running on Go stack
This CL contains compiler+runtime changes that detect C code
running on Go (not g0, not gsignal) stacks, and it contains
corrections for what it detected.
The detection works by changing the C prologue to use a different
stack guard word in the G than Go prologue does. On the g0 and
gsignal stacks, that stack guard word is set to the usual
stack guard value. But on ordinary Go stacks, that stack
guard word is set to ^0, which will make any stack split
check fail. The C prologue then calls morestackc instead
of morestack, and morestackc aborts the program with
a message about running C code on a Go stack.
This check catches all C code running on the Go stack
except NOSPLIT code. The NOSPLIT code is allowed,
so the check is complete. Since it is a dynamic check,
the code must execute to be caught. But unlike the static
checks we've been using in cmd/ld, the dynamic check
works with function pointers and other indirect calls.
For example it caught sigpanic being pushed onto Go
stacks in the signal handlers.
Fixes #8667.
LGTM=khr, iant
R=golang-codereviews, khr, iant
CC=golang-codereviews, r
https://golang.org/cl/133700043
2014-09-08 12:05:23 -06:00
|
|
|
func main_init()
|
2014-12-22 11:27:53 -07:00
|
|
|
|
|
|
|
//go:linkname main_main main.main
|
liblink, runtime: diagnose and fix C code running on Go stack
This CL contains compiler+runtime changes that detect C code
running on Go (not g0, not gsignal) stacks, and it contains
corrections for what it detected.
The detection works by changing the C prologue to use a different
stack guard word in the G than Go prologue does. On the g0 and
gsignal stacks, that stack guard word is set to the usual
stack guard value. But on ordinary Go stacks, that stack
guard word is set to ^0, which will make any stack split
check fail. The C prologue then calls morestackc instead
of morestack, and morestackc aborts the program with
a message about running C code on a Go stack.
This check catches all C code running on the Go stack
except NOSPLIT code. The NOSPLIT code is allowed,
so the check is complete. Since it is a dynamic check,
the code must execute to be caught. But unlike the static
checks we've been using in cmd/ld, the dynamic check
works with function pointers and other indirect calls.
For example it caught sigpanic being pushed onto Go
stacks in the signal handlers.
Fixes #8667.
LGTM=khr, iant
R=golang-codereviews, khr, iant
CC=golang-codereviews, r
https://golang.org/cl/133700043
2014-09-08 12:05:23 -06:00
|
|
|
func main_main()
|
|
|
|
|
|
|
|
// The main goroutine.
|
|
|
|
func main() {
|
|
|
|
g := getg()
|
|
|
|
|
|
|
|
// Racectx of m0->g0 is used only as the parent of the main goroutine.
|
|
|
|
// It must not be used for anything else.
|
|
|
|
g.m.g0.racectx = 0
|
|
|
|
|
|
|
|
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
|
|
|
|
// Using decimal instead of binary GB and MB because
|
|
|
|
// they look nicer in the stack overflow failure message.
|
|
|
|
if ptrSize == 8 {
|
|
|
|
maxstacksize = 1000000000
|
|
|
|
} else {
|
|
|
|
maxstacksize = 250000000
|
|
|
|
}
|
|
|
|
|
2015-02-12 00:18:31 -07:00
|
|
|
systemstack(func() {
|
|
|
|
newm(sysmon, nil)
|
|
|
|
})
|
liblink, runtime: diagnose and fix C code running on Go stack
This CL contains compiler+runtime changes that detect C code
running on Go (not g0, not gsignal) stacks, and it contains
corrections for what it detected.
The detection works by changing the C prologue to use a different
stack guard word in the G than Go prologue does. On the g0 and
gsignal stacks, that stack guard word is set to the usual
stack guard value. But on ordinary Go stacks, that stack
guard word is set to ^0, which will make any stack split
check fail. The C prologue then calls morestackc instead
of morestack, and morestackc aborts the program with
a message about running C code on a Go stack.
This check catches all C code running on the Go stack
except NOSPLIT code. The NOSPLIT code is allowed,
so the check is complete. Since it is a dynamic check,
the code must execute to be caught. But unlike the static
checks we've been using in cmd/ld, the dynamic check
works with function pointers and other indirect calls.
For example it caught sigpanic being pushed onto Go
stacks in the signal handlers.
Fixes #8667.
LGTM=khr, iant
R=golang-codereviews, khr, iant
CC=golang-codereviews, r
https://golang.org/cl/133700043
2014-09-08 12:05:23 -06:00
|
|
|
|
|
|
|
// Lock the main goroutine onto this, the main OS thread,
|
|
|
|
// during initialization. Most programs won't care, but a few
|
|
|
|
// do require certain calls to be made by the main thread.
|
|
|
|
// Those can arrange for main.main to run in the main thread
|
|
|
|
// by calling runtime.LockOSThread during initialization
|
|
|
|
// to preserve the lock.
|
|
|
|
lockOSThread()
|
|
|
|
|
runtime: use traceback to traverse defer structures
This makes the GC and the stack copying agree about how
to interpret the defer structures. Previously, only the stack
copying treated them precisely.
This removes an untyped memory allocation and fixes
at least three copystack bugs.
To make sure the GC can find the deferred argument
frame until it has been copied, keep a Defer on the defer list
during its execution.
In addition to making it possible to remove the untyped
memory allocation, keeping the Defer on the list fixes
two races between copystack and execution of defers
(in both gopanic and Goexit). The problem is that once
the defer has been taken off the list, a stack copy that
happens before the deferred arguments have been copied
back to the stack will not update the arguments correctly.
The new tests TestDeferPtrsPanic and TestDeferPtrsGoexit
(variations on the existing TestDeferPtrs) pass now but
failed before this CL.
In addition to those fixes, keeping the Defer on the list
helps correct a dangling pointer error during copystack.
The traceback routines walk the Defer chain to provide
information about where a panic may resume execution.
When the executing Defer was not on the Defer chain
but instead linked from the Panic chain, the traceback
had to walk the Panic chain too. But Panic structs are
on the stack and being updated by copystack.
Traceback's use of the Panic chain while copystack is
updating those structs means that it can follow an
updated pointer and find itself reading from the new stack.
The new stack is usually all zeros, so it sees an incorrect
early end to the chain. The new TestPanicUseStack makes
this happen at tip and dies when adjustdefers finds an
unexpected argp. The new StackCopyPoison mode
causes an earlier bad dereference instead.
By keeping the Defer on the list, traceback can avoid
walking the Panic chain at all, making it okay for copystack
to update the Panics.
We'd have the same problem for any Defers on the stack.
There was only one: gopanic's dabort. Since we are not
taking the executing Defer off the chain, we can use it
to do what dabort was doing, and then there are no
Defers on the stack ever, so it is okay for traceback to use
the Defer chain even while copystack is executing:
copystack cannot modify the Defer chain.
LGTM=khr
R=khr
CC=dvyukov, golang-codereviews, iant, rlh
https://golang.org/cl/141490043
2014-09-16 08:36:38 -06:00
|
|
|
if g.m != &m0 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("runtime.main not on m0")
|
runtime: use traceback to traverse defer structures
This makes the GC and the stack copying agree about how
to interpret the defer structures. Previously, only the stack
copying treated them precisely.
This removes an untyped memory allocation and fixes
at least three copystack bugs.
To make sure the GC can find the deferred argument
frame until it has been copied, keep a Defer on the defer list
during its execution.
In addition to making it possible to remove the untyped
memory allocation, keeping the Defer on the list fixes
two races between copystack and execution of defers
(in both gopanic and Goexit). The problem is that once
the defer has been taken off the list, a stack copy that
happens before the deferred arguments have been copied
back to the stack will not update the arguments correctly.
The new tests TestDeferPtrsPanic and TestDeferPtrsGoexit
(variations on the existing TestDeferPtrs) pass now but
failed before this CL.
In addition to those fixes, keeping the Defer on the list
helps correct a dangling pointer error during copystack.
The traceback routines walk the Defer chain to provide
information about where a panic may resume execution.
When the executing Defer was not on the Defer chain
but instead linked from the Panic chain, the traceback
had to walk the Panic chain too. But Panic structs are
on the stack and being updated by copystack.
Traceback's use of the Panic chain while copystack is
updating those structs means that it can follow an
updated pointer and find itself reading from the new stack.
The new stack is usually all zeros, so it sees an incorrect
early end to the chain. The new TestPanicUseStack makes
this happen at tip and dies when adjustdefers finds an
unexpected argp. The new StackCopyPoison mode
causes an earlier bad dereference instead.
By keeping the Defer on the list, traceback can avoid
walking the Panic chain at all, making it okay for copystack
to update the Panics.
We'd have the same problem for any Defers on the stack.
There was only one: gopanic's dabort. Since we are not
taking the executing Defer off the chain, we can use it
to do what dabort was doing, and then there are no
Defers on the stack ever, so it is okay for traceback to use
the Defer chain even while copystack is executing:
copystack cannot modify the Defer chain.
LGTM=khr
R=khr
CC=dvyukov, golang-codereviews, iant, rlh
https://golang.org/cl/141490043
2014-09-16 08:36:38 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
runtime_init() // must be before defer
|
|
|
|
|
liblink, runtime: diagnose and fix C code running on Go stack
This CL contains compiler+runtime changes that detect C code
running on Go (not g0, not gsignal) stacks, and it contains
corrections for what it detected.
The detection works by changing the C prologue to use a different
stack guard word in the G than Go prologue does. On the g0 and
gsignal stacks, that stack guard word is set to the usual
stack guard value. But on ordinary Go stacks, that stack
guard word is set to ^0, which will make any stack split
check fail. The C prologue then calls morestackc instead
of morestack, and morestackc aborts the program with
a message about running C code on a Go stack.
This check catches all C code running on the Go stack
except NOSPLIT code. The NOSPLIT code is allowed,
so the check is complete. Since it is a dynamic check,
the code must execute to be caught. But unlike the static
checks we've been using in cmd/ld, the dynamic check
works with function pointers and other indirect calls.
For example it caught sigpanic being pushed onto Go
stacks in the signal handlers.
Fixes #8667.
LGTM=khr, iant
R=golang-codereviews, khr, iant
CC=golang-codereviews, r
https://golang.org/cl/133700043
2014-09-08 12:05:23 -06:00
|
|
|
// Defer unlock so that runtime.Goexit during init does the unlock too.
|
|
|
|
needUnlock := true
|
|
|
|
defer func() {
|
|
|
|
if needUnlock {
|
|
|
|
unlockOSThread()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
memstats.enablegc = true // now that runtime is initialized, GC is okay
|
|
|
|
|
2014-11-11 15:08:33 -07:00
|
|
|
if iscgo {
|
|
|
|
if _cgo_thread_start == nil {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("_cgo_thread_start missing")
|
2014-11-11 15:08:33 -07:00
|
|
|
}
|
|
|
|
if _cgo_malloc == nil {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("_cgo_malloc missing")
|
2014-11-11 15:08:33 -07:00
|
|
|
}
|
|
|
|
if _cgo_free == nil {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("_cgo_free missing")
|
2014-11-11 15:08:33 -07:00
|
|
|
}
|
2014-11-20 21:59:22 -07:00
|
|
|
if GOOS != "windows" {
|
|
|
|
if _cgo_setenv == nil {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("_cgo_setenv missing")
|
2014-11-20 21:59:22 -07:00
|
|
|
}
|
|
|
|
if _cgo_unsetenv == nil {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("_cgo_unsetenv missing")
|
2014-11-20 21:59:22 -07:00
|
|
|
}
|
2014-11-11 15:08:33 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
liblink, runtime: diagnose and fix C code running on Go stack
This CL contains compiler+runtime changes that detect C code
running on Go (not g0, not gsignal) stacks, and it contains
corrections for what it detected.
The detection works by changing the C prologue to use a different
stack guard word in the G than Go prologue does. On the g0 and
gsignal stacks, that stack guard word is set to the usual
stack guard value. But on ordinary Go stacks, that stack
guard word is set to ^0, which will make any stack split
check fail. The C prologue then calls morestackc instead
of morestack, and morestackc aborts the program with
a message about running C code on a Go stack.
This check catches all C code running on the Go stack
except NOSPLIT code. The NOSPLIT code is allowed,
so the check is complete. Since it is a dynamic check,
the code must execute to be caught. But unlike the static
checks we've been using in cmd/ld, the dynamic check
works with function pointers and other indirect calls.
For example it caught sigpanic being pushed onto Go
stacks in the signal handlers.
Fixes #8667.
LGTM=khr, iant
R=golang-codereviews, khr, iant
CC=golang-codereviews, r
https://golang.org/cl/133700043
2014-09-08 12:05:23 -06:00
|
|
|
main_init()
|
|
|
|
|
|
|
|
needUnlock = false
|
|
|
|
unlockOSThread()
|
|
|
|
|
|
|
|
main_main()
|
|
|
|
if raceenabled {
|
|
|
|
racefini()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make racy client program work: if panicking on
|
|
|
|
// another goroutine at the same time as main returns,
|
|
|
|
// let the other goroutine finish printing the panic trace.
|
|
|
|
// Once it does, it will exit. See issue 3934.
|
|
|
|
if panicking != 0 {
|
2014-12-12 10:41:57 -07:00
|
|
|
gopark(nil, nil, "panicwait", traceEvGoStop)
|
liblink, runtime: diagnose and fix C code running on Go stack
This CL contains compiler+runtime changes that detect C code
running on Go (not g0, not gsignal) stacks, and it contains
corrections for what it detected.
The detection works by changing the C prologue to use a different
stack guard word in the G than Go prologue does. On the g0 and
gsignal stacks, that stack guard word is set to the usual
stack guard value. But on ordinary Go stacks, that stack
guard word is set to ^0, which will make any stack split
check fail. The C prologue then calls morestackc instead
of morestack, and morestackc aborts the program with
a message about running C code on a Go stack.
This check catches all C code running on the Go stack
except NOSPLIT code. The NOSPLIT code is allowed,
so the check is complete. Since it is a dynamic check,
the code must execute to be caught. But unlike the static
checks we've been using in cmd/ld, the dynamic check
works with function pointers and other indirect calls.
For example it caught sigpanic being pushed onto Go
stacks in the signal handlers.
Fixes #8667.
LGTM=khr, iant
R=golang-codereviews, khr, iant
CC=golang-codereviews, r
https://golang.org/cl/133700043
2014-09-08 12:05:23 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
exit(0)
|
|
|
|
for {
|
|
|
|
var x *int32
|
|
|
|
*x = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-29 01:08:10 -06:00
|
|
|
// start forcegc helper goroutine
|
|
|
|
func init() {
|
2014-09-02 17:18:46 -06:00
|
|
|
go forcegchelper()
|
|
|
|
}
|
|
|
|
|
|
|
|
func forcegchelper() {
|
|
|
|
forcegc.g = getg()
|
|
|
|
for {
|
|
|
|
lock(&forcegc.lock)
|
|
|
|
if forcegc.idle != 0 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("forcegc: phase error")
|
2014-09-02 17:18:46 -06:00
|
|
|
}
|
|
|
|
atomicstore(&forcegc.idle, 1)
|
2014-12-12 10:41:57 -07:00
|
|
|
goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock)
|
2014-09-02 17:18:46 -06:00
|
|
|
// this goroutine is explicitly resumed by sysmon
|
|
|
|
if debug.gctrace > 0 {
|
|
|
|
println("GC forced")
|
2014-08-29 01:08:10 -06:00
|
|
|
}
|
2015-02-19 13:48:40 -07:00
|
|
|
startGC(gcForceMode)
|
2014-09-02 17:18:46 -06:00
|
|
|
}
|
2014-08-29 01:08:10 -06:00
|
|
|
}
|
|
|
|
|
2014-09-11 14:33:01 -06:00
|
|
|
//go:nosplit
|
|
|
|
|
2014-08-19 01:49:59 -06:00
|
|
|
// Gosched yields the processor, allowing other goroutines to run. It does not
|
|
|
|
// suspend the current goroutine, so execution resumes automatically.
|
|
|
|
func Gosched() {
|
2014-09-03 09:35:22 -06:00
|
|
|
mcall(gosched_m)
|
2014-08-19 01:49:59 -06:00
|
|
|
}
|
2014-08-21 10:41:09 -06:00
|
|
|
|
|
|
|
// Puts the current goroutine into a waiting state and calls unlockf.
|
|
|
|
// If unlockf returns false, the goroutine is resumed.
|
2014-12-12 10:41:57 -07:00
|
|
|
func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte) {
|
2014-08-21 10:41:09 -06:00
|
|
|
mp := acquirem()
|
|
|
|
gp := mp.curg
|
2014-09-04 12:19:50 -06:00
|
|
|
status := readgstatus(gp)
|
|
|
|
if status != _Grunning && status != _Gscanrunning {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("gopark: bad g status")
|
2014-08-21 10:41:09 -06:00
|
|
|
}
|
|
|
|
mp.waitlock = lock
|
2014-11-11 15:08:33 -07:00
|
|
|
mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
|
2014-08-21 10:41:09 -06:00
|
|
|
gp.waitreason = reason
|
2014-12-12 10:41:57 -07:00
|
|
|
mp.waittraceev = traceEv
|
2014-08-21 10:41:09 -06:00
|
|
|
releasem(mp)
|
|
|
|
// can't do anything that might move the G between Ms here.
|
2014-09-03 09:35:22 -06:00
|
|
|
mcall(park_m)
|
2014-08-21 10:41:09 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Puts the current goroutine into a waiting state and unlocks the lock.
|
|
|
|
// The goroutine can be made runnable again by calling goready(gp).
|
2014-12-12 10:41:57 -07:00
|
|
|
func goparkunlock(lock *mutex, reason string, traceEv byte) {
|
|
|
|
gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv)
|
2014-08-21 10:41:09 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func goready(gp *g) {
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 12:54:31 -07:00
|
|
|
systemstack(func() {
|
2014-11-11 15:08:33 -07:00
|
|
|
ready(gp)
|
|
|
|
})
|
2014-08-21 10:41:09 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func acquireSudog() *sudog {
|
2014-09-07 21:16:12 -06:00
|
|
|
// Delicate dance: the semaphore implementation calls
|
|
|
|
// acquireSudog, acquireSudog calls new(sudog),
|
|
|
|
// new calls malloc, malloc can call the garbage collector,
|
|
|
|
// and the garbage collector calls the semaphore implementation
|
|
|
|
// in stoptheworld.
|
|
|
|
// Break the cycle by doing acquirem/releasem around new(sudog).
|
|
|
|
// The acquirem/releasem increments m.locks during new(sudog),
|
|
|
|
// which keeps the garbage collector from being invoked.
|
|
|
|
mp := acquirem()
|
2015-02-02 14:33:02 -07:00
|
|
|
pp := mp.p
|
|
|
|
if len(pp.sudogcache) == 0 {
|
|
|
|
lock(&sched.sudoglock)
|
|
|
|
// First, try to grab a batch from central cache.
|
|
|
|
for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
|
|
|
|
s := sched.sudogcache
|
|
|
|
sched.sudogcache = s.next
|
|
|
|
s.next = nil
|
|
|
|
pp.sudogcache = append(pp.sudogcache, s)
|
|
|
|
}
|
|
|
|
unlock(&sched.sudoglock)
|
|
|
|
// If the central cache is empty, allocate a new one.
|
|
|
|
if len(pp.sudogcache) == 0 {
|
|
|
|
pp.sudogcache = append(pp.sudogcache, new(sudog))
|
|
|
|
}
|
|
|
|
}
|
2015-03-05 07:52:41 -07:00
|
|
|
n := len(pp.sudogcache)
|
|
|
|
s := pp.sudogcache[n-1]
|
|
|
|
pp.sudogcache[n-1] = nil
|
|
|
|
pp.sudogcache = pp.sudogcache[:n-1]
|
2015-02-02 14:33:02 -07:00
|
|
|
if s.elem != nil {
|
|
|
|
throw("acquireSudog: found s.elem != nil in cache")
|
2014-10-02 14:49:11 -06:00
|
|
|
}
|
2014-09-07 21:16:12 -06:00
|
|
|
releasem(mp)
|
2015-02-02 14:33:02 -07:00
|
|
|
return s
|
2014-08-21 10:41:09 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func releaseSudog(s *sudog) {
|
2014-10-02 14:49:11 -06:00
|
|
|
if s.elem != nil {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("runtime: sudog with non-nil elem")
|
2014-10-02 14:49:11 -06:00
|
|
|
}
|
2014-10-03 13:33:29 -06:00
|
|
|
if s.selectdone != nil {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("runtime: sudog with non-nil selectdone")
|
2014-10-03 13:33:29 -06:00
|
|
|
}
|
runtime: fix sudog leak
The SudoG used to sit on the stack, so it was cheap to allocated
and didn't need to be cleaned up when finished.
For the conversion to Go, we had to move sudog off the stack
for a few reasons, so we added a cache of recently used sudogs
to keep allocation cheap. But we didn't add any of the necessary
cleanup before adding a SudoG to the new cache, and so the cached
SudoGs had stale pointers inside them that have caused all sorts
of awful, hard to debug problems.
CL 155760043 made sure SudoG.elem is cleaned up.
CL 150520043 made sure SudoG.selectdone is cleaned up.
This CL makes sure SudoG.next, SudoG.prev, and SudoG.waitlink
are cleaned up. I should have done this when I did the other two
fields; instead I wasted a week tracking down a leak they caused.
A dangling SudoG.waitlink can point into a sudogcache list that
has been "forgotten" in order to let the GC collect it, but that
dangling .waitlink keeps the list from being collected.
And then the list holding the SudoG with the dangling waitlink
can find itself in the same situation, and so on. We end up
with lists of lists of unusable SudoGs that are still linked into
the object graph and never collected (given the right mix of
non-trivial selects and non-channel synchronization).
More details in golang.org/issue/9110.
Fixes #9110.
LGTM=r
R=r
CC=dvyukov, golang-codereviews, iant, khr
https://golang.org/cl/177870043
2014-11-16 14:44:45 -07:00
|
|
|
if s.next != nil {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("runtime: sudog with non-nil next")
|
runtime: fix sudog leak
The SudoG used to sit on the stack, so it was cheap to allocated
and didn't need to be cleaned up when finished.
For the conversion to Go, we had to move sudog off the stack
for a few reasons, so we added a cache of recently used sudogs
to keep allocation cheap. But we didn't add any of the necessary
cleanup before adding a SudoG to the new cache, and so the cached
SudoGs had stale pointers inside them that have caused all sorts
of awful, hard to debug problems.
CL 155760043 made sure SudoG.elem is cleaned up.
CL 150520043 made sure SudoG.selectdone is cleaned up.
This CL makes sure SudoG.next, SudoG.prev, and SudoG.waitlink
are cleaned up. I should have done this when I did the other two
fields; instead I wasted a week tracking down a leak they caused.
A dangling SudoG.waitlink can point into a sudogcache list that
has been "forgotten" in order to let the GC collect it, but that
dangling .waitlink keeps the list from being collected.
And then the list holding the SudoG with the dangling waitlink
can find itself in the same situation, and so on. We end up
with lists of lists of unusable SudoGs that are still linked into
the object graph and never collected (given the right mix of
non-trivial selects and non-channel synchronization).
More details in golang.org/issue/9110.
Fixes #9110.
LGTM=r
R=r
CC=dvyukov, golang-codereviews, iant, khr
https://golang.org/cl/177870043
2014-11-16 14:44:45 -07:00
|
|
|
}
|
|
|
|
if s.prev != nil {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("runtime: sudog with non-nil prev")
|
runtime: fix sudog leak
The SudoG used to sit on the stack, so it was cheap to allocated
and didn't need to be cleaned up when finished.
For the conversion to Go, we had to move sudog off the stack
for a few reasons, so we added a cache of recently used sudogs
to keep allocation cheap. But we didn't add any of the necessary
cleanup before adding a SudoG to the new cache, and so the cached
SudoGs had stale pointers inside them that have caused all sorts
of awful, hard to debug problems.
CL 155760043 made sure SudoG.elem is cleaned up.
CL 150520043 made sure SudoG.selectdone is cleaned up.
This CL makes sure SudoG.next, SudoG.prev, and SudoG.waitlink
are cleaned up. I should have done this when I did the other two
fields; instead I wasted a week tracking down a leak they caused.
A dangling SudoG.waitlink can point into a sudogcache list that
has been "forgotten" in order to let the GC collect it, but that
dangling .waitlink keeps the list from being collected.
And then the list holding the SudoG with the dangling waitlink
can find itself in the same situation, and so on. We end up
with lists of lists of unusable SudoGs that are still linked into
the object graph and never collected (given the right mix of
non-trivial selects and non-channel synchronization).
More details in golang.org/issue/9110.
Fixes #9110.
LGTM=r
R=r
CC=dvyukov, golang-codereviews, iant, khr
https://golang.org/cl/177870043
2014-11-16 14:44:45 -07:00
|
|
|
}
|
|
|
|
if s.waitlink != nil {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("runtime: sudog with non-nil waitlink")
|
runtime: fix sudog leak
The SudoG used to sit on the stack, so it was cheap to allocated
and didn't need to be cleaned up when finished.
For the conversion to Go, we had to move sudog off the stack
for a few reasons, so we added a cache of recently used sudogs
to keep allocation cheap. But we didn't add any of the necessary
cleanup before adding a SudoG to the new cache, and so the cached
SudoGs had stale pointers inside them that have caused all sorts
of awful, hard to debug problems.
CL 155760043 made sure SudoG.elem is cleaned up.
CL 150520043 made sure SudoG.selectdone is cleaned up.
This CL makes sure SudoG.next, SudoG.prev, and SudoG.waitlink
are cleaned up. I should have done this when I did the other two
fields; instead I wasted a week tracking down a leak they caused.
A dangling SudoG.waitlink can point into a sudogcache list that
has been "forgotten" in order to let the GC collect it, but that
dangling .waitlink keeps the list from being collected.
And then the list holding the SudoG with the dangling waitlink
can find itself in the same situation, and so on. We end up
with lists of lists of unusable SudoGs that are still linked into
the object graph and never collected (given the right mix of
non-trivial selects and non-channel synchronization).
More details in golang.org/issue/9110.
Fixes #9110.
LGTM=r
R=r
CC=dvyukov, golang-codereviews, iant, khr
https://golang.org/cl/177870043
2014-11-16 14:44:45 -07:00
|
|
|
}
|
2014-10-02 14:49:11 -06:00
|
|
|
gp := getg()
|
|
|
|
if gp.param != nil {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("runtime: releaseSudog with non-nil gp.param")
|
2014-10-02 14:49:11 -06:00
|
|
|
}
|
2015-02-02 14:33:02 -07:00
|
|
|
mp := acquirem() // avoid rescheduling to another P
|
|
|
|
pp := mp.p
|
|
|
|
if len(pp.sudogcache) == cap(pp.sudogcache) {
|
|
|
|
// Transfer half of local cache to the central cache.
|
|
|
|
var first, last *sudog
|
|
|
|
for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
|
2015-03-05 07:52:41 -07:00
|
|
|
n := len(pp.sudogcache)
|
|
|
|
p := pp.sudogcache[n-1]
|
|
|
|
pp.sudogcache[n-1] = nil
|
|
|
|
pp.sudogcache = pp.sudogcache[:n-1]
|
2015-02-02 14:33:02 -07:00
|
|
|
if first == nil {
|
|
|
|
first = p
|
|
|
|
} else {
|
|
|
|
last.next = p
|
|
|
|
}
|
|
|
|
last = p
|
|
|
|
}
|
|
|
|
lock(&sched.sudoglock)
|
|
|
|
last.next = sched.sudogcache
|
|
|
|
sched.sudogcache = first
|
|
|
|
unlock(&sched.sudoglock)
|
|
|
|
}
|
|
|
|
pp.sudogcache = append(pp.sudogcache, s)
|
|
|
|
releasem(mp)
|
2014-08-21 10:41:09 -06:00
|
|
|
}
|
2014-09-03 09:10:38 -06:00
|
|
|
|
|
|
|
// funcPC returns the entry PC of the function f.
|
|
|
|
// It assumes that f is a func value. Otherwise the behavior is undefined.
|
2014-09-04 11:51:12 -06:00
|
|
|
//go:nosplit
|
2014-09-03 09:10:38 -06:00
|
|
|
func funcPC(f interface{}) uintptr {
|
|
|
|
return **(**uintptr)(add(unsafe.Pointer(&f), ptrSize))
|
|
|
|
}
|
2014-09-04 19:12:31 -06:00
|
|
|
|
|
|
|
// called from assembly
|
|
|
|
func badmcall(fn func(*g)) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("runtime: mcall called on m->g0 stack")
|
2014-09-04 19:12:31 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func badmcall2(fn func(*g)) {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("runtime: mcall function returned")
|
2014-09-04 19:12:31 -06:00
|
|
|
}
|
2014-09-06 11:07:23 -06:00
|
|
|
|
2014-09-06 11:12:47 -06:00
|
|
|
func badreflectcall() {
|
|
|
|
panic("runtime: arg size to reflect.call more than 1GB")
|
|
|
|
}
|
|
|
|
|
2014-09-06 11:07:23 -06:00
|
|
|
func lockedOSThread() bool {
|
|
|
|
gp := getg()
|
|
|
|
return gp.lockedm != nil && gp.m.lockedg != nil
|
|
|
|
}
|
2014-09-12 14:12:39 -06:00
|
|
|
|
2014-11-11 15:08:33 -07:00
|
|
|
var (
|
|
|
|
allgs []*g
|
|
|
|
allglock mutex
|
|
|
|
)
|
|
|
|
|
2014-09-12 14:12:39 -06:00
|
|
|
func allgadd(gp *g) {
|
|
|
|
if readgstatus(gp) == _Gidle {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("allgadd: bad status Gidle")
|
2014-09-12 14:12:39 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
lock(&allglock)
|
|
|
|
allgs = append(allgs, gp)
|
|
|
|
allg = &allgs[0]
|
|
|
|
allglen = uintptr(len(allgs))
|
|
|
|
unlock(&allglock)
|
|
|
|
}
|