2012-07-04 04:52:51 -06:00
|
|
|
// Copyright 2012 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
#include "runtime.h"
|
|
|
|
#include "arch_GOARCH.h"
|
|
|
|
#include "stack.h"
|
2013-03-14 08:10:12 -06:00
|
|
|
#include "malloc.h"
|
2012-07-04 04:52:51 -06:00
|
|
|
|
|
|
|
// Code related to defer, panic and recover.
|
|
|
|
|
|
|
|
uint32 runtime·panicking;
|
|
|
|
static Lock paniclk;
|
|
|
|
|
2012-12-22 12:54:39 -07:00
|
|
|
enum
|
|
|
|
{
|
|
|
|
DeferChunkSize = 2048
|
|
|
|
};
|
|
|
|
|
|
|
|
// Allocate a Defer, usually as part of the larger frame of deferred functions.
|
|
|
|
// Each defer must be released with both popdefer and freedefer.
|
|
|
|
static Defer*
|
|
|
|
newdefer(int32 siz)
|
|
|
|
{
|
|
|
|
int32 total;
|
|
|
|
DeferChunk *c;
|
|
|
|
Defer *d;
|
|
|
|
|
|
|
|
c = g->dchunk;
|
|
|
|
total = sizeof(*d) + ROUND(siz, sizeof(uintptr)) - sizeof(d->args);
|
|
|
|
if(c == nil || total > DeferChunkSize - c->off) {
|
|
|
|
if(total > DeferChunkSize / 2) {
|
|
|
|
// Not worth putting in any chunk.
|
|
|
|
// Allocate a separate block.
|
|
|
|
d = runtime·malloc(total);
|
|
|
|
d->siz = siz;
|
|
|
|
d->special = 1;
|
|
|
|
d->free = 1;
|
|
|
|
d->link = g->defer;
|
|
|
|
g->defer = d;
|
|
|
|
return d;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cannot fit in current chunk.
|
|
|
|
// Switch to next chunk, allocating if necessary.
|
|
|
|
c = g->dchunknext;
|
|
|
|
if(c == nil)
|
|
|
|
c = runtime·malloc(DeferChunkSize);
|
|
|
|
c->prev = g->dchunk;
|
|
|
|
c->off = sizeof(*c);
|
|
|
|
g->dchunk = c;
|
|
|
|
g->dchunknext = nil;
|
|
|
|
}
|
|
|
|
|
|
|
|
d = (Defer*)((byte*)c + c->off);
|
|
|
|
c->off += total;
|
|
|
|
d->siz = siz;
|
|
|
|
d->special = 0;
|
|
|
|
d->free = 0;
|
|
|
|
d->link = g->defer;
|
|
|
|
g->defer = d;
|
|
|
|
return d;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pop the current defer from the defer stack.
|
|
|
|
// Its contents are still valid until the goroutine begins executing again.
|
|
|
|
// In particular it is safe to call reflect.call(d->fn, d->argp, d->siz) after
|
|
|
|
// popdefer returns.
|
|
|
|
static void
|
|
|
|
popdefer(void)
|
|
|
|
{
|
|
|
|
Defer *d;
|
|
|
|
DeferChunk *c;
|
|
|
|
int32 total;
|
|
|
|
|
|
|
|
d = g->defer;
|
|
|
|
if(d == nil)
|
|
|
|
runtime·throw("runtime: popdefer nil");
|
|
|
|
g->defer = d->link;
|
|
|
|
if(d->special) {
|
|
|
|
// Nothing else to do.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
total = sizeof(*d) + ROUND(d->siz, sizeof(uintptr)) - sizeof(d->args);
|
|
|
|
c = g->dchunk;
|
|
|
|
if(c == nil || (byte*)d+total != (byte*)c+c->off)
|
|
|
|
runtime·throw("runtime: popdefer phase error");
|
|
|
|
c->off -= total;
|
|
|
|
if(c->off == sizeof(*c)) {
|
|
|
|
// Chunk now empty, so pop from stack.
|
|
|
|
// Save in dchunknext both to help with pingponging between frames
|
|
|
|
// and to make sure d is still valid on return.
|
|
|
|
if(g->dchunknext != nil)
|
|
|
|
runtime·free(g->dchunknext);
|
|
|
|
g->dchunknext = c;
|
|
|
|
g->dchunk = c->prev;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Free the given defer.
|
|
|
|
// For defers in the per-goroutine chunk this just clears the saved arguments.
|
|
|
|
// For large defers allocated on the heap, this frees them.
|
|
|
|
// The defer cannot be used after this call.
|
|
|
|
static void
|
|
|
|
freedefer(Defer *d)
|
|
|
|
{
|
2013-07-01 15:36:08 -06:00
|
|
|
int32 total;
|
|
|
|
|
2012-12-22 12:54:39 -07:00
|
|
|
if(d->special) {
|
|
|
|
if(d->free)
|
|
|
|
runtime·free(d);
|
|
|
|
} else {
|
2013-07-01 15:36:08 -06:00
|
|
|
// Wipe out any possible pointers in argp/pc/fn/args.
|
|
|
|
total = sizeof(*d) + ROUND(d->siz, sizeof(uintptr)) - sizeof(d->args);
|
|
|
|
runtime·memclr((byte*)d, total);
|
2012-12-22 12:54:39 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-04 04:52:51 -06:00
|
|
|
// Create a new deferred function fn with siz bytes of arguments.
|
|
|
|
// The compiler turns a defer statement into a call to this.
|
|
|
|
// Cannot split the stack because it assumes that the arguments
|
|
|
|
// are available sequentially after &fn; they would not be
|
|
|
|
// copied if a stack split occurred. It's OK for this to call
|
|
|
|
// functions that split the stack.
|
|
|
|
#pragma textflag 7
|
|
|
|
uintptr
|
2013-02-21 15:01:13 -07:00
|
|
|
runtime·deferproc(int32 siz, FuncVal *fn, ...)
|
2012-07-04 04:52:51 -06:00
|
|
|
{
|
|
|
|
Defer *d;
|
|
|
|
|
2012-12-22 12:54:39 -07:00
|
|
|
d = newdefer(siz);
|
2012-07-04 04:52:51 -06:00
|
|
|
d->fn = fn;
|
|
|
|
d->pc = runtime·getcallerpc(&siz);
|
|
|
|
if(thechar == '5')
|
|
|
|
d->argp = (byte*)(&fn+2); // skip caller's saved link register
|
|
|
|
else
|
|
|
|
d->argp = (byte*)(&fn+1);
|
|
|
|
runtime·memmove(d->args, d->argp, d->siz);
|
|
|
|
|
|
|
|
// deferproc returns 0 normally.
|
|
|
|
// a deferred func that stops a panic
|
|
|
|
// makes the deferproc return 1.
|
|
|
|
// the code the compiler generates always
|
|
|
|
// checks the return value and jumps to the
|
|
|
|
// end of the function if deferproc returns != 0.
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run a deferred function if there is one.
|
|
|
|
// The compiler inserts a call to this at the end of any
|
|
|
|
// function which calls defer.
|
|
|
|
// If there is a deferred function, this will call runtime·jmpdefer,
|
|
|
|
// which will jump to the deferred function such that it appears
|
|
|
|
// to have been called by the caller of deferreturn at the point
|
|
|
|
// just before deferreturn was called. The effect is that deferreturn
|
|
|
|
// is called again and again until there are no more deferred functions.
|
|
|
|
// Cannot split the stack because we reuse the caller's frame to
|
|
|
|
// call the deferred function.
|
2013-07-17 10:47:18 -06:00
|
|
|
//
|
|
|
|
// The ... in the prototype keeps the compiler from declaring
|
|
|
|
// an argument frame size. deferreturn is a very special function,
|
|
|
|
// and if the runtime ever asks for its frame size, that means
|
|
|
|
// the traceback routines are probably broken.
|
2012-07-04 04:52:51 -06:00
|
|
|
#pragma textflag 7
|
|
|
|
void
|
2013-07-17 10:47:18 -06:00
|
|
|
runtime·deferreturn(uintptr arg0, ...)
|
2012-07-04 04:52:51 -06:00
|
|
|
{
|
|
|
|
Defer *d;
|
2013-02-21 15:01:13 -07:00
|
|
|
byte *argp;
|
|
|
|
FuncVal *fn;
|
2012-07-04 04:52:51 -06:00
|
|
|
|
|
|
|
d = g->defer;
|
|
|
|
if(d == nil)
|
|
|
|
return;
|
|
|
|
argp = (byte*)&arg0;
|
|
|
|
if(d->argp != argp)
|
|
|
|
return;
|
2013-07-18 10:26:47 -06:00
|
|
|
|
|
|
|
// Moving arguments around.
|
|
|
|
// Do not allow preemption here, because the garbage collector
|
|
|
|
// won't know the form of the arguments until the jmpdefer can
|
|
|
|
// flip the PC over to fn.
|
|
|
|
m->locks++;
|
2012-07-04 04:52:51 -06:00
|
|
|
runtime·memmove(argp, d->args, d->siz);
|
|
|
|
fn = d->fn;
|
2012-12-22 12:54:39 -07:00
|
|
|
popdefer();
|
|
|
|
freedefer(d);
|
2013-07-18 10:26:47 -06:00
|
|
|
m->locks--;
|
|
|
|
if(m->locks == 0 && g->preempt)
|
|
|
|
g->stackguard0 = StackPreempt;
|
2012-07-04 04:52:51 -06:00
|
|
|
runtime·jmpdefer(fn, argp);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run all deferred functions for the current goroutine.
|
|
|
|
static void
|
|
|
|
rundefer(void)
|
|
|
|
{
|
|
|
|
Defer *d;
|
|
|
|
|
|
|
|
while((d = g->defer) != nil) {
|
2012-12-22 12:54:39 -07:00
|
|
|
popdefer();
|
2012-07-04 04:52:51 -06:00
|
|
|
reflect·call(d->fn, (byte*)d->args, d->siz);
|
2012-12-22 12:54:39 -07:00
|
|
|
freedefer(d);
|
2012-07-04 04:52:51 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Print all currently active panics. Used when crashing.
|
|
|
|
static void
|
|
|
|
printpanics(Panic *p)
|
|
|
|
{
|
|
|
|
if(p->link) {
|
|
|
|
printpanics(p->link);
|
|
|
|
runtime·printf("\t");
|
|
|
|
}
|
|
|
|
runtime·printf("panic: ");
|
|
|
|
runtime·printany(p->arg);
|
|
|
|
if(p->recovered)
|
|
|
|
runtime·printf(" [recovered]");
|
|
|
|
runtime·printf("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void recovery(G*);
|
|
|
|
|
|
|
|
// The implementation of the predeclared function panic.
|
|
|
|
void
|
|
|
|
runtime·panic(Eface e)
|
|
|
|
{
|
|
|
|
Defer *d;
|
|
|
|
Panic *p;
|
2012-12-22 12:54:39 -07:00
|
|
|
void *pc, *argp;
|
|
|
|
|
2012-07-04 04:52:51 -06:00
|
|
|
p = runtime·mal(sizeof *p);
|
|
|
|
p->arg = e;
|
|
|
|
p->link = g->panic;
|
2013-06-12 06:49:38 -06:00
|
|
|
p->stackbase = g->stackbase;
|
2012-07-04 04:52:51 -06:00
|
|
|
g->panic = p;
|
|
|
|
|
|
|
|
for(;;) {
|
|
|
|
d = g->defer;
|
|
|
|
if(d == nil)
|
|
|
|
break;
|
|
|
|
// take defer off list in case of recursive panic
|
2012-12-22 12:54:39 -07:00
|
|
|
popdefer();
|
2012-07-04 04:52:51 -06:00
|
|
|
g->ispanic = true; // rock for newstack, where reflect.call ends up
|
2012-12-22 12:54:39 -07:00
|
|
|
argp = d->argp;
|
|
|
|
pc = d->pc;
|
2012-07-04 04:52:51 -06:00
|
|
|
reflect·call(d->fn, (byte*)d->args, d->siz);
|
2012-12-22 12:54:39 -07:00
|
|
|
freedefer(d);
|
2012-07-04 04:52:51 -06:00
|
|
|
if(p->recovered) {
|
|
|
|
g->panic = p->link;
|
|
|
|
if(g->panic == nil) // must be done with signal
|
|
|
|
g->sig = 0;
|
|
|
|
runtime·free(p);
|
2012-12-22 12:54:39 -07:00
|
|
|
// Pass information about recovering frame to recovery.
|
|
|
|
g->sigcode0 = (uintptr)argp;
|
|
|
|
g->sigcode1 = (uintptr)pc;
|
2012-07-04 04:52:51 -06:00
|
|
|
runtime·mcall(recovery);
|
|
|
|
runtime·throw("recovery failed"); // mcall should not return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ran out of deferred calls - old-school panic now
|
|
|
|
runtime·startpanic();
|
|
|
|
printpanics(g->panic);
|
|
|
|
runtime·dopanic(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unwind the stack after a deferred function calls recover
|
|
|
|
// after a panic. Then arrange to continue running as though
|
|
|
|
// the caller of the deferred function returned normally.
|
|
|
|
static void
|
|
|
|
recovery(G *gp)
|
|
|
|
{
|
2012-12-22 12:54:39 -07:00
|
|
|
void *argp;
|
2013-06-12 06:49:38 -06:00
|
|
|
uintptr pc;
|
2012-12-22 12:54:39 -07:00
|
|
|
|
|
|
|
// Info about defer passed in G struct.
|
|
|
|
argp = (void*)gp->sigcode0;
|
2013-06-12 06:49:38 -06:00
|
|
|
pc = (uintptr)gp->sigcode1;
|
2012-07-04 04:52:51 -06:00
|
|
|
|
|
|
|
// Unwind to the stack frame with d's arguments in it.
|
2012-12-22 12:54:39 -07:00
|
|
|
runtime·unwindstack(gp, argp);
|
2012-07-04 04:52:51 -06:00
|
|
|
|
|
|
|
// Make the deferproc for this d return again,
|
|
|
|
// this time returning 1. The calling function will
|
|
|
|
// jump to the standard return epilogue.
|
|
|
|
// The -2*sizeof(uintptr) makes up for the
|
|
|
|
// two extra words that are on the stack at
|
|
|
|
// each call to deferproc.
|
|
|
|
// (The pc we're returning to does pop pop
|
|
|
|
// before it tests the return value.)
|
|
|
|
// On the arm there are 2 saved LRs mixed in too.
|
|
|
|
if(thechar == '5')
|
2012-12-22 12:54:39 -07:00
|
|
|
gp->sched.sp = (uintptr)argp - 4*sizeof(uintptr);
|
2012-07-04 04:52:51 -06:00
|
|
|
else
|
2012-12-22 12:54:39 -07:00
|
|
|
gp->sched.sp = (uintptr)argp - 2*sizeof(uintptr);
|
|
|
|
gp->sched.pc = pc;
|
runtime: record proper goroutine state during stack split
Until now, the goroutine state has been scattered during the
execution of newstack and oldstack. It's all there, and those routines
know how to get back to a working goroutine, but other pieces of
the system, like stack traces, do not. If something does interrupt
the newstack or oldstack execution, the rest of the system can't
understand the goroutine. For example, if newstack decides there
is an overflow and calls throw, the stack tracer wouldn't dump the
goroutine correctly.
For newstack to save a useful state snapshot, it needs to be able
to rewind the PC in the function that triggered the split back to
the beginning of the function. (The PC is a few instructions in, just
after the call to morestack.) To make that possible, we change the
prologues to insert a jmp back to the beginning of the function
after the call to morestack. That is, the prologue used to be roughly:
TEXT myfunc
check for split
jmpcond nosplit
call morestack
nosplit:
sub $xxx, sp
Now an extra instruction is inserted after the call:
TEXT myfunc
start:
check for split
jmpcond nosplit
call morestack
jmp start
nosplit:
sub $xxx, sp
The jmp is not executed directly. It is decoded and simulated by
runtime.rewindmorestack to discover the beginning of the function,
and then the call to morestack returns directly to the start label
instead of to the jump instruction. So logically the jmp is still
executed, just not by the cpu.
The prologue thus repeats in the case of a function that needs a
stack split, but against the cost of the split itself, the extra few
instructions are noise. The repeated prologue has the nice effect of
making a stack split double-check that the new stack is big enough:
if morestack happens to return on a too-small stack, we'll now notice
before corruption happens.
The ability for newstack to rewind to the beginning of the function
should help preemption too. If newstack decides that it was called
for preemption instead of a stack split, it now has the goroutine state
correctly paused if rescheduling is needed, and when the goroutine
can run again, it can return to the start label on its original stack
and re-execute the split check.
Here is an example of a split stack overflow showing the full
trace, without any special cases in the stack printer.
(This one was triggered by making the split check incorrect.)
runtime: newstack framesize=0x0 argsize=0x18 sp=0x6aebd0 stack=[0x6b0000, 0x6b0fa0]
morebuf={pc:0x69f5b sp:0x6aebd8 lr:0x0}
sched={pc:0x68880 sp:0x6aebd0 lr:0x0 ctxt:0x34e700}
runtime: split stack overflow: 0x6aebd0 < 0x6b0000
fatal error: runtime: split stack overflow
goroutine 1 [stack split]:
runtime.mallocgc(0x290, 0x100000000, 0x1)
/Users/rsc/g/go/src/pkg/runtime/zmalloc_darwin_amd64.c:21 fp=0x6aebd8
runtime.new()
/Users/rsc/g/go/src/pkg/runtime/zmalloc_darwin_amd64.c:682 +0x5b fp=0x6aec08
go/build.(*Context).Import(0x5ae340, 0xc210030c71, 0xa, 0xc2100b4380, 0x1b, ...)
/Users/rsc/g/go/src/pkg/go/build/build.go:424 +0x3a fp=0x6b00a0
main.loadImport(0xc210030c71, 0xa, 0xc2100b4380, 0x1b, 0xc2100b42c0, ...)
/Users/rsc/g/go/src/cmd/go/pkg.go:249 +0x371 fp=0x6b01a8
main.(*Package).load(0xc21017c800, 0xc2100b42c0, 0xc2101828c0, 0x0, 0x0, ...)
/Users/rsc/g/go/src/cmd/go/pkg.go:431 +0x2801 fp=0x6b0c98
main.loadPackage(0x369040, 0x7, 0xc2100b42c0, 0x0)
/Users/rsc/g/go/src/cmd/go/pkg.go:709 +0x857 fp=0x6b0f80
----- stack segment boundary -----
main.(*builder).action(0xc2100902a0, 0x0, 0x0, 0xc2100e6c00, 0xc2100e5750, ...)
/Users/rsc/g/go/src/cmd/go/build.go:539 +0x437 fp=0x6b14a0
main.(*builder).action(0xc2100902a0, 0x0, 0x0, 0xc21015b400, 0x2, ...)
/Users/rsc/g/go/src/cmd/go/build.go:528 +0x1d2 fp=0x6b1658
main.(*builder).test(0xc2100902a0, 0xc210092000, 0x0, 0x0, 0xc21008ff60, ...)
/Users/rsc/g/go/src/cmd/go/test.go:622 +0x1b53 fp=0x6b1f68
----- stack segment boundary -----
main.runTest(0x5a6b20, 0xc21000a020, 0x2, 0x2)
/Users/rsc/g/go/src/cmd/go/test.go:366 +0xd09 fp=0x6a5cf0
main.main()
/Users/rsc/g/go/src/cmd/go/main.go:161 +0x4f9 fp=0x6a5f78
runtime.main()
/Users/rsc/g/go/src/pkg/runtime/proc.c:183 +0x92 fp=0x6a5fa0
runtime.goexit()
/Users/rsc/g/go/src/pkg/runtime/proc.c:1266 fp=0x6a5fa8
And here is a seg fault during oldstack:
SIGSEGV: segmentation violation
PC=0x1b2a6
runtime.oldstack()
/Users/rsc/g/go/src/pkg/runtime/stack.c:159 +0x76
runtime.lessstack()
/Users/rsc/g/go/src/pkg/runtime/asm_amd64.s:270 +0x22
goroutine 1 [stack unsplit]:
fmt.(*pp).printArg(0x2102e64e0, 0xe5c80, 0x2102c9220, 0x73, 0x0, ...)
/Users/rsc/g/go/src/pkg/fmt/print.go:818 +0x3d3 fp=0x221031e6f8
fmt.(*pp).doPrintf(0x2102e64e0, 0x12fb20, 0x2, 0x221031eb98, 0x1, ...)
/Users/rsc/g/go/src/pkg/fmt/print.go:1183 +0x15cb fp=0x221031eaf0
fmt.Sprintf(0x12fb20, 0x2, 0x221031eb98, 0x1, 0x1, ...)
/Users/rsc/g/go/src/pkg/fmt/print.go:234 +0x67 fp=0x221031eb40
flag.(*stringValue).String(0x2102c9210, 0x1, 0x0)
/Users/rsc/g/go/src/pkg/flag/flag.go:180 +0xb3 fp=0x221031ebb0
flag.(*FlagSet).Var(0x2102f6000, 0x293d38, 0x2102c9210, 0x143490, 0xa, ...)
/Users/rsc/g/go/src/pkg/flag/flag.go:633 +0x40 fp=0x221031eca0
flag.(*FlagSet).StringVar(0x2102f6000, 0x2102c9210, 0x143490, 0xa, 0x12fa60, ...)
/Users/rsc/g/go/src/pkg/flag/flag.go:550 +0x91 fp=0x221031ece8
flag.(*FlagSet).String(0x2102f6000, 0x143490, 0xa, 0x12fa60, 0x0, ...)
/Users/rsc/g/go/src/pkg/flag/flag.go:563 +0x87 fp=0x221031ed38
flag.String(0x143490, 0xa, 0x12fa60, 0x0, 0x161950, ...)
/Users/rsc/g/go/src/pkg/flag/flag.go:570 +0x6b fp=0x221031ed80
testing.init()
/Users/rsc/g/go/src/pkg/testing/testing.go:-531 +0xbb fp=0x221031edc0
strings_test.init()
/Users/rsc/g/go/src/pkg/strings/strings_test.go:1115 +0x62 fp=0x221031ef70
main.init()
strings/_test/_testmain.go:90 +0x3d fp=0x221031ef78
runtime.main()
/Users/rsc/g/go/src/pkg/runtime/proc.c:180 +0x8a fp=0x221031efa0
runtime.goexit()
/Users/rsc/g/go/src/pkg/runtime/proc.c:1269 fp=0x221031efa8
goroutine 2 [runnable]:
runtime.MHeap_Scavenger()
/Users/rsc/g/go/src/pkg/runtime/mheap.c:438
runtime.goexit()
/Users/rsc/g/go/src/pkg/runtime/proc.c:1269
created by runtime.main
/Users/rsc/g/go/src/pkg/runtime/proc.c:166
rax 0x23ccc0
rbx 0x23ccc0
rcx 0x0
rdx 0x38
rdi 0x2102c0170
rsi 0x221032cfe0
rbp 0x221032cfa0
rsp 0x7fff5fbff5b0
r8 0x2102c0120
r9 0x221032cfa0
r10 0x221032c000
r11 0x104ce8
r12 0xe5c80
r13 0x1be82baac718
r14 0x13091135f7d69200
r15 0x0
rip 0x1b2a6
rflags 0x10246
cs 0x2b
fs 0x0
gs 0x0
Fixes #5723.
R=r, dvyukov, go.peter.90, dave, iant
CC=golang-dev
https://golang.org/cl/10360048
2013-06-27 09:32:01 -06:00
|
|
|
gp->sched.lr = 0;
|
2013-06-12 13:22:26 -06:00
|
|
|
gp->sched.ret = 1;
|
|
|
|
runtime·gogo(&gp->sched);
|
2012-07-04 04:52:51 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Free stack frames until we hit the last one
|
|
|
|
// or until we find the one that contains the sp.
|
|
|
|
void
|
|
|
|
runtime·unwindstack(G *gp, byte *sp)
|
|
|
|
{
|
|
|
|
Stktop *top;
|
|
|
|
byte *stk;
|
|
|
|
|
|
|
|
// Must be called from a different goroutine, usually m->g0.
|
|
|
|
if(g == gp)
|
|
|
|
runtime·throw("unwindstack on self");
|
|
|
|
|
2013-06-12 06:49:38 -06:00
|
|
|
while((top = (Stktop*)gp->stackbase) != 0 && top->stackbase != 0) {
|
2012-07-04 04:52:51 -06:00
|
|
|
stk = (byte*)gp->stackguard - StackGuard;
|
|
|
|
if(stk <= sp && sp < (byte*)gp->stackbase)
|
|
|
|
break;
|
2013-06-12 06:49:38 -06:00
|
|
|
gp->stackbase = top->stackbase;
|
|
|
|
gp->stackguard = top->stackguard;
|
2013-06-03 02:28:24 -06:00
|
|
|
gp->stackguard0 = gp->stackguard;
|
2012-07-04 04:52:51 -06:00
|
|
|
if(top->free != 0)
|
|
|
|
runtime·stackfree(stk, top->free);
|
|
|
|
}
|
|
|
|
|
|
|
|
if(sp != nil && (sp < (byte*)gp->stackguard - StackGuard || (byte*)gp->stackbase < sp)) {
|
|
|
|
runtime·printf("recover: %p not in [%p, %p]\n", sp, gp->stackguard - StackGuard, gp->stackbase);
|
|
|
|
runtime·throw("bad unwindstack");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The implementation of the predeclared function recover.
|
|
|
|
// Cannot split the stack because it needs to reliably
|
|
|
|
// find the stack segment of its caller.
|
|
|
|
#pragma textflag 7
|
|
|
|
void
|
|
|
|
runtime·recover(byte *argp, Eface ret)
|
|
|
|
{
|
|
|
|
Stktop *top, *oldtop;
|
|
|
|
Panic *p;
|
|
|
|
|
|
|
|
// Must be a panic going on.
|
|
|
|
if((p = g->panic) == nil || p->recovered)
|
|
|
|
goto nomatch;
|
|
|
|
|
|
|
|
// Frame must be at the top of the stack segment,
|
|
|
|
// because each deferred call starts a new stack
|
|
|
|
// segment as a side effect of using reflect.call.
|
|
|
|
// (There has to be some way to remember the
|
|
|
|
// variable argument frame size, and the segment
|
|
|
|
// code already takes care of that for us, so we
|
|
|
|
// reuse it.)
|
|
|
|
//
|
|
|
|
// As usual closures complicate things: the fp that
|
|
|
|
// the closure implementation function claims to have
|
|
|
|
// is where the explicit arguments start, after the
|
|
|
|
// implicit pointer arguments and PC slot.
|
|
|
|
// If we're on the first new segment for a closure,
|
|
|
|
// then fp == top - top->args is correct, but if
|
|
|
|
// the closure has its own big argument frame and
|
|
|
|
// allocated a second segment (see below),
|
|
|
|
// the fp is slightly above top - top->args.
|
|
|
|
// That condition can't happen normally though
|
|
|
|
// (stack pointers go down, not up), so we can accept
|
|
|
|
// any fp between top and top - top->args as
|
|
|
|
// indicating the top of the segment.
|
|
|
|
top = (Stktop*)g->stackbase;
|
|
|
|
if(argp < (byte*)top - top->argsize || (byte*)top < argp)
|
|
|
|
goto nomatch;
|
|
|
|
|
|
|
|
// The deferred call makes a new segment big enough
|
|
|
|
// for the argument frame but not necessarily big
|
|
|
|
// enough for the function's local frame (size unknown
|
|
|
|
// at the time of the call), so the function might have
|
|
|
|
// made its own segment immediately. If that's the
|
|
|
|
// case, back top up to the older one, the one that
|
|
|
|
// reflect.call would have made for the panic.
|
|
|
|
//
|
|
|
|
// The fp comparison here checks that the argument
|
|
|
|
// frame that was copied during the split (the top->args
|
|
|
|
// bytes above top->fp) abuts the old top of stack.
|
|
|
|
// This is a correct test for both closure and non-closure code.
|
|
|
|
oldtop = (Stktop*)top->stackbase;
|
|
|
|
if(oldtop != nil && top->argp == (byte*)oldtop - top->argsize)
|
|
|
|
top = oldtop;
|
|
|
|
|
|
|
|
// Now we have the segment that was created to
|
|
|
|
// run this call. It must have been marked as a panic segment.
|
|
|
|
if(!top->panic)
|
|
|
|
goto nomatch;
|
|
|
|
|
|
|
|
// Okay, this is the top frame of a deferred call
|
|
|
|
// in response to a panic. It can see the panic argument.
|
|
|
|
p->recovered = 1;
|
|
|
|
ret = p->arg;
|
|
|
|
FLUSH(&ret);
|
|
|
|
return;
|
|
|
|
|
|
|
|
nomatch:
|
|
|
|
ret.type = nil;
|
|
|
|
ret.data = nil;
|
|
|
|
FLUSH(&ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
runtime·startpanic(void)
|
|
|
|
{
|
2013-05-28 12:14:47 -06:00
|
|
|
if(runtime·mheap.cachealloc.size == 0) { // very early
|
2013-03-14 08:10:12 -06:00
|
|
|
runtime·printf("runtime: panic before malloc heap initialized\n");
|
|
|
|
m->mallocing = 1; // tell rest of panic not to try to malloc
|
|
|
|
} else if(m->mcache == nil) // can happen if called from signal handler or throw
|
2013-02-20 10:17:56 -07:00
|
|
|
m->mcache = runtime·allocmcache();
|
2012-07-04 04:52:51 -06:00
|
|
|
if(m->dying) {
|
|
|
|
runtime·printf("panic during panic\n");
|
|
|
|
runtime·exit(3);
|
|
|
|
}
|
|
|
|
m->dying = 1;
|
|
|
|
runtime·xadd(&runtime·panicking, 1);
|
|
|
|
runtime·lock(&paniclk);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
runtime·dopanic(int32 unused)
|
|
|
|
{
|
|
|
|
static bool didothers;
|
2013-03-14 23:11:03 -06:00
|
|
|
bool crash;
|
2013-07-16 17:44:24 -06:00
|
|
|
int32 t;
|
2012-07-04 04:52:51 -06:00
|
|
|
|
|
|
|
if(g->sig != 0)
|
|
|
|
runtime·printf("[signal %x code=%p addr=%p pc=%p]\n",
|
|
|
|
g->sig, g->sigcode0, g->sigcode1, g->sigpc);
|
|
|
|
|
2013-07-16 17:44:24 -06:00
|
|
|
if((t = runtime·gotraceback(&crash)) > 0){
|
2012-07-04 04:52:51 -06:00
|
|
|
if(g != m->g0) {
|
|
|
|
runtime·printf("\n");
|
|
|
|
runtime·goroutineheader(g);
|
2013-06-12 06:49:38 -06:00
|
|
|
runtime·traceback((uintptr)runtime·getcallerpc(&unused), (uintptr)runtime·getcallersp(&unused), 0, g);
|
2013-07-16 17:44:24 -06:00
|
|
|
} else if(t >= 2) {
|
|
|
|
runtime·printf("\nruntime stack:\n");
|
|
|
|
runtime·traceback((uintptr)runtime·getcallerpc(&unused), (uintptr)runtime·getcallersp(&unused), 0, g);
|
2012-07-04 04:52:51 -06:00
|
|
|
}
|
|
|
|
if(!didothers) {
|
|
|
|
didothers = true;
|
|
|
|
runtime·tracebackothers(g);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
runtime·unlock(&paniclk);
|
|
|
|
if(runtime·xadd(&runtime·panicking, -1) != 0) {
|
|
|
|
// Some other m is panicking too.
|
|
|
|
// Let it print what it needs to print.
|
|
|
|
// Wait forever without chewing up cpu.
|
|
|
|
// It will exit when it's done.
|
|
|
|
static Lock deadlock;
|
|
|
|
runtime·lock(&deadlock);
|
|
|
|
runtime·lock(&deadlock);
|
|
|
|
}
|
2013-03-14 23:11:03 -06:00
|
|
|
|
|
|
|
if(crash)
|
|
|
|
runtime·crash();
|
2012-07-04 04:52:51 -06:00
|
|
|
|
|
|
|
runtime·exit(2);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
runtime·panicindex(void)
|
|
|
|
{
|
|
|
|
runtime·panicstring("index out of range");
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
runtime·panicslice(void)
|
|
|
|
{
|
|
|
|
runtime·panicstring("slice bounds out of range");
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
runtime·throwreturn(void)
|
|
|
|
{
|
|
|
|
// can only happen if compiler is broken
|
|
|
|
runtime·throw("no return at end of a typed function - compiler is broken");
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
runtime·throwinit(void)
|
|
|
|
{
|
|
|
|
// can only happen with linker skew
|
|
|
|
runtime·throw("recursive call during initialization - linker skew");
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
runtime·throw(int8 *s)
|
|
|
|
{
|
2013-01-29 03:57:11 -07:00
|
|
|
if(m->throwing == 0)
|
|
|
|
m->throwing = 1;
|
2012-07-04 04:52:51 -06:00
|
|
|
runtime·startpanic();
|
2012-12-29 19:48:25 -07:00
|
|
|
runtime·printf("fatal error: %s\n", s);
|
2012-07-04 04:52:51 -06:00
|
|
|
runtime·dopanic(0);
|
|
|
|
*(int32*)0 = 0; // not reached
|
|
|
|
runtime·exit(1); // even more not reached
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
runtime·panicstring(int8 *s)
|
|
|
|
{
|
|
|
|
Eface err;
|
|
|
|
|
|
|
|
if(m->gcing) {
|
|
|
|
runtime·printf("panic: %s\n", s);
|
|
|
|
runtime·throw("panic during gc");
|
|
|
|
}
|
|
|
|
runtime·newErrorString(runtime·gostringnocopy((byte*)s), &err);
|
|
|
|
runtime·panic(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
runtime·Goexit(void)
|
|
|
|
{
|
|
|
|
rundefer();
|
|
|
|
runtime·goexit();
|
|
|
|
}
|