1
0
mirror of https://github.com/golang/go synced 2024-11-19 15:05:00 -07:00
go/src/runtime/proc.go

237 lines
5.2 KiB
Go
Raw Normal View History

// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
func newsysmon()
func runtime_init()
func main_init()
func main_main()
// The main goroutine.
func main() {
g := getg()
// Racectx of m0->g0 is used only as the parent of the main goroutine.
// It must not be used for anything else.
g.m.g0.racectx = 0
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
// Using decimal instead of binary GB and MB because
// they look nicer in the stack overflow failure message.
if ptrSize == 8 {
maxstacksize = 1000000000
} else {
maxstacksize = 250000000
}
onM(newsysmon)
// Lock the main goroutine onto this, the main OS thread,
// during initialization. Most programs won't care, but a few
// do require certain calls to be made by the main thread.
// Those can arrange for main.main to run in the main thread
// by calling runtime.LockOSThread during initialization
// to preserve the lock.
lockOSThread()
runtime: use traceback to traverse defer structures This makes the GC and the stack copying agree about how to interpret the defer structures. Previously, only the stack copying treated them precisely. This removes an untyped memory allocation and fixes at least three copystack bugs. To make sure the GC can find the deferred argument frame until it has been copied, keep a Defer on the defer list during its execution. In addition to making it possible to remove the untyped memory allocation, keeping the Defer on the list fixes two races between copystack and execution of defers (in both gopanic and Goexit). The problem is that once the defer has been taken off the list, a stack copy that happens before the deferred arguments have been copied back to the stack will not update the arguments correctly. The new tests TestDeferPtrsPanic and TestDeferPtrsGoexit (variations on the existing TestDeferPtrs) pass now but failed before this CL. In addition to those fixes, keeping the Defer on the list helps correct a dangling pointer error during copystack. The traceback routines walk the Defer chain to provide information about where a panic may resume execution. When the executing Defer was not on the Defer chain but instead linked from the Panic chain, the traceback had to walk the Panic chain too. But Panic structs are on the stack and being updated by copystack. Traceback's use of the Panic chain while copystack is updating those structs means that it can follow an updated pointer and find itself reading from the new stack. The new stack is usually all zeros, so it sees an incorrect early end to the chain. The new TestPanicUseStack makes this happen at tip and dies when adjustdefers finds an unexpected argp. The new StackCopyPoison mode causes an earlier bad dereference instead. By keeping the Defer on the list, traceback can avoid walking the Panic chain at all, making it okay for copystack to update the Panics. We'd have the same problem for any Defers on the stack. There was only one: gopanic's dabort. Since we are not taking the executing Defer off the chain, we can use it to do what dabort was doing, and then there are no Defers on the stack ever, so it is okay for traceback to use the Defer chain even while copystack is executing: copystack cannot modify the Defer chain. LGTM=khr R=khr CC=dvyukov, golang-codereviews, iant, rlh https://golang.org/cl/141490043
2014-09-16 08:36:38 -06:00
if g.m != &m0 {
gothrow("runtime.main not on m0")
}
runtime_init() // must be before defer
// Defer unlock so that runtime.Goexit during init does the unlock too.
needUnlock := true
defer func() {
if needUnlock {
unlockOSThread()
}
}()
memstats.enablegc = true // now that runtime is initialized, GC is okay
main_init()
needUnlock = false
unlockOSThread()
main_main()
if raceenabled {
racefini()
}
// Make racy client program work: if panicking on
// another goroutine at the same time as main returns,
// let the other goroutine finish printing the panic trace.
// Once it does, it will exit. See issue 3934.
if panicking != 0 {
gopark(nil, nil, "panicwait")
}
exit(0)
for {
var x *int32
*x = 0
}
}
var parkunlock_c byte
// start forcegc helper goroutine
func init() {
go forcegchelper()
}
func forcegchelper() {
forcegc.g = getg()
forcegc.g.issystem = true
for {
lock(&forcegc.lock)
if forcegc.idle != 0 {
gothrow("forcegc: phase error")
}
atomicstore(&forcegc.idle, 1)
goparkunlock(&forcegc.lock, "force gc (idle)")
// this goroutine is explicitly resumed by sysmon
if debug.gctrace > 0 {
println("GC forced")
}
gogc(1)
}
}
//go:nosplit
// Gosched yields the processor, allowing other goroutines to run. It does not
// suspend the current goroutine, so execution resumes automatically.
func Gosched() {
mcall(gosched_m)
}
// Puts the current goroutine into a waiting state and calls unlockf.
// If unlockf returns false, the goroutine is resumed.
func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) {
mp := acquirem()
gp := mp.curg
status := readgstatus(gp)
if status != _Grunning && status != _Gscanrunning {
gothrow("gopark: bad g status")
}
mp.waitlock = lock
mp.waitunlockf = unlockf
gp.waitreason = reason
releasem(mp)
// can't do anything that might move the G between Ms here.
mcall(park_m)
}
// Puts the current goroutine into a waiting state and unlocks the lock.
// The goroutine can be made runnable again by calling goready(gp).
func goparkunlock(lock *mutex, reason string) {
gopark(unsafe.Pointer(&parkunlock_c), unsafe.Pointer(lock), reason)
}
func goready(gp *g) {
mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(gp)
onM(ready_m)
releasem(mp)
}
//go:nosplit
func acquireSudog() *sudog {
c := gomcache()
s := c.sudogcache
if s != nil {
if s.elem != nil {
gothrow("acquireSudog: found s.elem != nil in cache")
}
c.sudogcache = s.next
return s
}
// Delicate dance: the semaphore implementation calls
// acquireSudog, acquireSudog calls new(sudog),
// new calls malloc, malloc can call the garbage collector,
// and the garbage collector calls the semaphore implementation
// in stoptheworld.
// Break the cycle by doing acquirem/releasem around new(sudog).
// The acquirem/releasem increments m.locks during new(sudog),
// which keeps the garbage collector from being invoked.
mp := acquirem()
p := new(sudog)
releasem(mp)
return p
}
//go:nosplit
func releaseSudog(s *sudog) {
if s.elem != nil {
gothrow("runtime: sudog with non-nil elem")
}
if s.selectdone != nil {
gothrow("runtime: sudog with non-nil selectdone")
}
gp := getg()
if gp.param != nil {
gothrow("runtime: releaseSudog with non-nil gp.param")
}
c := gomcache()
s.next = c.sudogcache
c.sudogcache = s
}
// funcPC returns the entry PC of the function f.
// It assumes that f is a func value. Otherwise the behavior is undefined.
//go:nosplit
func funcPC(f interface{}) uintptr {
return **(**uintptr)(add(unsafe.Pointer(&f), ptrSize))
}
// called from assembly
func badmcall(fn func(*g)) {
gothrow("runtime: mcall called on m->g0 stack")
}
func badmcall2(fn func(*g)) {
gothrow("runtime: mcall function returned")
}
func badreflectcall() {
panic("runtime: arg size to reflect.call more than 1GB")
}
func lockedOSThread() bool {
gp := getg()
return gp.lockedm != nil && gp.m.lockedg != nil
}
func newP() *p {
return new(p)
}
2014-09-24 14:55:26 -06:00
func newM() *m {
return new(m)
}
func newG() *g {
return new(g)
}
func allgadd(gp *g) {
if readgstatus(gp) == _Gidle {
gothrow("allgadd: bad status Gidle")
}
lock(&allglock)
allgs = append(allgs, gp)
allg = &allgs[0]
allglen = uintptr(len(allgs))
unlock(&allglock)
}