2014-08-19 01:49:59 -06:00
|
|
|
// Copyright 2014 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
2014-08-21 10:41:09 -06:00
|
|
|
import "unsafe"
|
|
|
|
|
liblink, runtime: diagnose and fix C code running on Go stack
This CL contains compiler+runtime changes that detect C code
running on Go (not g0, not gsignal) stacks, and it contains
corrections for what it detected.
The detection works by changing the C prologue to use a different
stack guard word in the G than Go prologue does. On the g0 and
gsignal stacks, that stack guard word is set to the usual
stack guard value. But on ordinary Go stacks, that stack
guard word is set to ^0, which will make any stack split
check fail. The C prologue then calls morestackc instead
of morestack, and morestackc aborts the program with
a message about running C code on a Go stack.
This check catches all C code running on the Go stack
except NOSPLIT code. The NOSPLIT code is allowed,
so the check is complete. Since it is a dynamic check,
the code must execute to be caught. But unlike the static
checks we've been using in cmd/ld, the dynamic check
works with function pointers and other indirect calls.
For example it caught sigpanic being pushed onto Go
stacks in the signal handlers.
Fixes #8667.
LGTM=khr, iant
R=golang-codereviews, khr, iant
CC=golang-codereviews, r
https://golang.org/cl/133700043
2014-09-08 12:05:23 -06:00
|
|
|
func newsysmon()
|
|
|
|
|
|
|
|
func runtime_init()
|
|
|
|
func main_init()
|
|
|
|
func main_main()
|
|
|
|
|
|
|
|
// The main goroutine.
|
|
|
|
func main() {
|
|
|
|
g := getg()
|
|
|
|
|
|
|
|
// Racectx of m0->g0 is used only as the parent of the main goroutine.
|
|
|
|
// It must not be used for anything else.
|
|
|
|
g.m.g0.racectx = 0
|
|
|
|
|
|
|
|
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
|
|
|
|
// Using decimal instead of binary GB and MB because
|
|
|
|
// they look nicer in the stack overflow failure message.
|
|
|
|
if ptrSize == 8 {
|
|
|
|
maxstacksize = 1000000000
|
|
|
|
} else {
|
|
|
|
maxstacksize = 250000000
|
|
|
|
}
|
|
|
|
|
|
|
|
onM(newsysmon)
|
|
|
|
|
|
|
|
// Lock the main goroutine onto this, the main OS thread,
|
|
|
|
// during initialization. Most programs won't care, but a few
|
|
|
|
// do require certain calls to be made by the main thread.
|
|
|
|
// Those can arrange for main.main to run in the main thread
|
|
|
|
// by calling runtime.LockOSThread during initialization
|
|
|
|
// to preserve the lock.
|
|
|
|
lockOSThread()
|
|
|
|
|
runtime: use traceback to traverse defer structures
This makes the GC and the stack copying agree about how
to interpret the defer structures. Previously, only the stack
copying treated them precisely.
This removes an untyped memory allocation and fixes
at least three copystack bugs.
To make sure the GC can find the deferred argument
frame until it has been copied, keep a Defer on the defer list
during its execution.
In addition to making it possible to remove the untyped
memory allocation, keeping the Defer on the list fixes
two races between copystack and execution of defers
(in both gopanic and Goexit). The problem is that once
the defer has been taken off the list, a stack copy that
happens before the deferred arguments have been copied
back to the stack will not update the arguments correctly.
The new tests TestDeferPtrsPanic and TestDeferPtrsGoexit
(variations on the existing TestDeferPtrs) pass now but
failed before this CL.
In addition to those fixes, keeping the Defer on the list
helps correct a dangling pointer error during copystack.
The traceback routines walk the Defer chain to provide
information about where a panic may resume execution.
When the executing Defer was not on the Defer chain
but instead linked from the Panic chain, the traceback
had to walk the Panic chain too. But Panic structs are
on the stack and being updated by copystack.
Traceback's use of the Panic chain while copystack is
updating those structs means that it can follow an
updated pointer and find itself reading from the new stack.
The new stack is usually all zeros, so it sees an incorrect
early end to the chain. The new TestPanicUseStack makes
this happen at tip and dies when adjustdefers finds an
unexpected argp. The new StackCopyPoison mode
causes an earlier bad dereference instead.
By keeping the Defer on the list, traceback can avoid
walking the Panic chain at all, making it okay for copystack
to update the Panics.
We'd have the same problem for any Defers on the stack.
There was only one: gopanic's dabort. Since we are not
taking the executing Defer off the chain, we can use it
to do what dabort was doing, and then there are no
Defers on the stack ever, so it is okay for traceback to use
the Defer chain even while copystack is executing:
copystack cannot modify the Defer chain.
LGTM=khr
R=khr
CC=dvyukov, golang-codereviews, iant, rlh
https://golang.org/cl/141490043
2014-09-16 08:36:38 -06:00
|
|
|
if g.m != &m0 {
|
|
|
|
gothrow("runtime.main not on m0")
|
|
|
|
}
|
|
|
|
|
|
|
|
runtime_init() // must be before defer
|
|
|
|
|
liblink, runtime: diagnose and fix C code running on Go stack
This CL contains compiler+runtime changes that detect C code
running on Go (not g0, not gsignal) stacks, and it contains
corrections for what it detected.
The detection works by changing the C prologue to use a different
stack guard word in the G than Go prologue does. On the g0 and
gsignal stacks, that stack guard word is set to the usual
stack guard value. But on ordinary Go stacks, that stack
guard word is set to ^0, which will make any stack split
check fail. The C prologue then calls morestackc instead
of morestack, and morestackc aborts the program with
a message about running C code on a Go stack.
This check catches all C code running on the Go stack
except NOSPLIT code. The NOSPLIT code is allowed,
so the check is complete. Since it is a dynamic check,
the code must execute to be caught. But unlike the static
checks we've been using in cmd/ld, the dynamic check
works with function pointers and other indirect calls.
For example it caught sigpanic being pushed onto Go
stacks in the signal handlers.
Fixes #8667.
LGTM=khr, iant
R=golang-codereviews, khr, iant
CC=golang-codereviews, r
https://golang.org/cl/133700043
2014-09-08 12:05:23 -06:00
|
|
|
// Defer unlock so that runtime.Goexit during init does the unlock too.
|
|
|
|
needUnlock := true
|
|
|
|
defer func() {
|
|
|
|
if needUnlock {
|
|
|
|
unlockOSThread()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
memstats.enablegc = true // now that runtime is initialized, GC is okay
|
|
|
|
|
|
|
|
main_init()
|
|
|
|
|
|
|
|
needUnlock = false
|
|
|
|
unlockOSThread()
|
|
|
|
|
|
|
|
main_main()
|
|
|
|
if raceenabled {
|
|
|
|
racefini()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make racy client program work: if panicking on
|
|
|
|
// another goroutine at the same time as main returns,
|
|
|
|
// let the other goroutine finish printing the panic trace.
|
|
|
|
// Once it does, it will exit. See issue 3934.
|
|
|
|
if panicking != 0 {
|
|
|
|
gopark(nil, nil, "panicwait")
|
|
|
|
}
|
|
|
|
|
|
|
|
exit(0)
|
|
|
|
for {
|
|
|
|
var x *int32
|
|
|
|
*x = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-21 10:41:09 -06:00
|
|
|
var parkunlock_c byte
|
|
|
|
|
2014-08-29 01:08:10 -06:00
|
|
|
// start forcegc helper goroutine
|
|
|
|
func init() {
|
2014-09-02 17:18:46 -06:00
|
|
|
go forcegchelper()
|
|
|
|
}
|
|
|
|
|
|
|
|
func forcegchelper() {
|
|
|
|
forcegc.g = getg()
|
|
|
|
forcegc.g.issystem = true
|
|
|
|
for {
|
|
|
|
lock(&forcegc.lock)
|
|
|
|
if forcegc.idle != 0 {
|
|
|
|
gothrow("forcegc: phase error")
|
|
|
|
}
|
|
|
|
atomicstore(&forcegc.idle, 1)
|
|
|
|
goparkunlock(&forcegc.lock, "force gc (idle)")
|
|
|
|
// this goroutine is explicitly resumed by sysmon
|
|
|
|
if debug.gctrace > 0 {
|
|
|
|
println("GC forced")
|
2014-08-29 01:08:10 -06:00
|
|
|
}
|
2014-09-02 17:18:46 -06:00
|
|
|
gogc(1)
|
|
|
|
}
|
2014-08-29 01:08:10 -06:00
|
|
|
}
|
|
|
|
|
2014-09-11 14:33:01 -06:00
|
|
|
//go:nosplit
|
|
|
|
|
2014-08-19 01:49:59 -06:00
|
|
|
// Gosched yields the processor, allowing other goroutines to run. It does not
|
|
|
|
// suspend the current goroutine, so execution resumes automatically.
|
|
|
|
func Gosched() {
|
2014-09-03 09:35:22 -06:00
|
|
|
mcall(gosched_m)
|
2014-08-19 01:49:59 -06:00
|
|
|
}
|
2014-08-21 10:41:09 -06:00
|
|
|
|
|
|
|
// Puts the current goroutine into a waiting state and calls unlockf.
|
|
|
|
// If unlockf returns false, the goroutine is resumed.
|
|
|
|
func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) {
|
|
|
|
mp := acquirem()
|
|
|
|
gp := mp.curg
|
2014-09-04 12:19:50 -06:00
|
|
|
status := readgstatus(gp)
|
|
|
|
if status != _Grunning && status != _Gscanrunning {
|
2014-08-21 10:41:09 -06:00
|
|
|
gothrow("gopark: bad g status")
|
|
|
|
}
|
|
|
|
mp.waitlock = lock
|
2014-08-28 14:23:10 -06:00
|
|
|
mp.waitunlockf = unlockf
|
2014-08-21 10:41:09 -06:00
|
|
|
gp.waitreason = reason
|
|
|
|
releasem(mp)
|
|
|
|
// can't do anything that might move the G between Ms here.
|
2014-09-03 09:35:22 -06:00
|
|
|
mcall(park_m)
|
2014-08-21 10:41:09 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Puts the current goroutine into a waiting state and unlocks the lock.
|
|
|
|
// The goroutine can be made runnable again by calling goready(gp).
|
2014-08-27 21:32:49 -06:00
|
|
|
func goparkunlock(lock *mutex, reason string) {
|
2014-08-21 10:41:09 -06:00
|
|
|
gopark(unsafe.Pointer(&parkunlock_c), unsafe.Pointer(lock), reason)
|
|
|
|
}
|
|
|
|
|
|
|
|
func goready(gp *g) {
|
|
|
|
mp := acquirem()
|
|
|
|
mp.ptrarg[0] = unsafe.Pointer(gp)
|
2014-09-03 09:35:22 -06:00
|
|
|
onM(ready_m)
|
2014-08-21 10:41:09 -06:00
|
|
|
releasem(mp)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func acquireSudog() *sudog {
|
|
|
|
c := gomcache()
|
|
|
|
s := c.sudogcache
|
|
|
|
if s != nil {
|
2014-08-25 10:12:26 -06:00
|
|
|
c.sudogcache = s.next
|
2014-08-21 10:41:09 -06:00
|
|
|
return s
|
|
|
|
}
|
2014-09-07 21:16:12 -06:00
|
|
|
|
|
|
|
// Delicate dance: the semaphore implementation calls
|
|
|
|
// acquireSudog, acquireSudog calls new(sudog),
|
|
|
|
// new calls malloc, malloc can call the garbage collector,
|
|
|
|
// and the garbage collector calls the semaphore implementation
|
|
|
|
// in stoptheworld.
|
|
|
|
// Break the cycle by doing acquirem/releasem around new(sudog).
|
|
|
|
// The acquirem/releasem increments m.locks during new(sudog),
|
|
|
|
// which keeps the garbage collector from being invoked.
|
|
|
|
mp := acquirem()
|
|
|
|
p := new(sudog)
|
|
|
|
releasem(mp)
|
|
|
|
return p
|
2014-08-21 10:41:09 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func releaseSudog(s *sudog) {
|
|
|
|
c := gomcache()
|
2014-08-25 10:12:26 -06:00
|
|
|
s.next = c.sudogcache
|
2014-08-21 10:41:09 -06:00
|
|
|
c.sudogcache = s
|
|
|
|
}
|
2014-09-03 09:10:38 -06:00
|
|
|
|
|
|
|
// funcPC returns the entry PC of the function f.
|
|
|
|
// It assumes that f is a func value. Otherwise the behavior is undefined.
|
2014-09-04 11:51:12 -06:00
|
|
|
//go:nosplit
|
2014-09-03 09:10:38 -06:00
|
|
|
func funcPC(f interface{}) uintptr {
|
|
|
|
return **(**uintptr)(add(unsafe.Pointer(&f), ptrSize))
|
|
|
|
}
|
2014-09-04 19:12:31 -06:00
|
|
|
|
|
|
|
// called from assembly
|
|
|
|
func badmcall(fn func(*g)) {
|
|
|
|
gothrow("runtime: mcall called on m->g0 stack")
|
|
|
|
}
|
|
|
|
|
|
|
|
func badmcall2(fn func(*g)) {
|
|
|
|
gothrow("runtime: mcall function returned")
|
|
|
|
}
|
2014-09-06 11:07:23 -06:00
|
|
|
|
2014-09-06 11:12:47 -06:00
|
|
|
func badreflectcall() {
|
|
|
|
panic("runtime: arg size to reflect.call more than 1GB")
|
|
|
|
}
|
|
|
|
|
2014-09-06 11:07:23 -06:00
|
|
|
func lockedOSThread() bool {
|
|
|
|
gp := getg()
|
|
|
|
return gp.lockedm != nil && gp.m.lockedg != nil
|
|
|
|
}
|
2014-09-12 14:12:39 -06:00
|
|
|
|
|
|
|
func newP() *p {
|
|
|
|
return new(p)
|
|
|
|
}
|
|
|
|
|
|
|
|
func allgadd(gp *g) {
|
|
|
|
if readgstatus(gp) == _Gidle {
|
|
|
|
gothrow("allgadd: bad status Gidle")
|
|
|
|
}
|
|
|
|
|
|
|
|
lock(&allglock)
|
|
|
|
allgs = append(allgs, gp)
|
|
|
|
allg = &allgs[0]
|
|
|
|
allglen = uintptr(len(allgs))
|
|
|
|
unlock(&allglock)
|
|
|
|
}
|