mirror of
https://github.com/golang/go
synced 2024-11-23 04:40:09 -07:00
Revert "liblink, cmd/ld, runtime: remove stackguard1"
This reverts commit ab0535ae3f
.
I think it will remain useful to distinguish code that must
run on a system stack from code that can run on either stack,
even if that distinction is no
longer based on the implementation language.
That is, I expect to add a //go:systemstack comment that,
in terms of the old implementation, tells the compiler,
to pretend this function was written in C.
Change-Id: I33d2ebb2f99ae12496484c6ec8ed07233d693275
Reviewed-on: https://go-review.googlesource.com/2275
Reviewed-by: Russ Cox <rsc@golang.org>
This commit is contained in:
parent
a1c9e10371
commit
e6d3511264
@ -131,6 +131,7 @@ struct LSym
|
|||||||
short type;
|
short type;
|
||||||
short version;
|
short version;
|
||||||
uchar dupok;
|
uchar dupok;
|
||||||
|
uchar cfunc;
|
||||||
uchar external;
|
uchar external;
|
||||||
uchar nosplit;
|
uchar nosplit;
|
||||||
uchar reachable;
|
uchar reachable;
|
||||||
|
@ -1564,3 +1564,56 @@ diag(char *fmt, ...)
|
|||||||
errorexit();
|
errorexit();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
checkgo(void)
|
||||||
|
{
|
||||||
|
LSym *s;
|
||||||
|
Reloc *r;
|
||||||
|
int i;
|
||||||
|
int changed;
|
||||||
|
|
||||||
|
if(!debug['C'])
|
||||||
|
return;
|
||||||
|
|
||||||
|
// TODO(rsc,khr): Eventually we want to get to no Go-called C functions at all,
|
||||||
|
// which would simplify this logic quite a bit.
|
||||||
|
|
||||||
|
// Mark every Go-called C function with cfunc=2, recursively.
|
||||||
|
do {
|
||||||
|
changed = 0;
|
||||||
|
for(s = ctxt->textp; s != nil; s = s->next) {
|
||||||
|
if(s->cfunc == 0 || (s->cfunc == 2 && s->nosplit)) {
|
||||||
|
for(i=0; i<s->nr; i++) {
|
||||||
|
r = &s->r[i];
|
||||||
|
if(r->sym == nil)
|
||||||
|
continue;
|
||||||
|
if((r->type == R_CALL || r->type == R_CALLARM) && r->sym->type == STEXT) {
|
||||||
|
if(r->sym->cfunc == 1) {
|
||||||
|
changed = 1;
|
||||||
|
r->sym->cfunc = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}while(changed);
|
||||||
|
|
||||||
|
// Complain about Go-called C functions that can split the stack
|
||||||
|
// (that can be preempted for garbage collection or trigger a stack copy).
|
||||||
|
for(s = ctxt->textp; s != nil; s = s->next) {
|
||||||
|
if(s->cfunc == 0 || (s->cfunc == 2 && s->nosplit)) {
|
||||||
|
for(i=0; i<s->nr; i++) {
|
||||||
|
r = &s->r[i];
|
||||||
|
if(r->sym == nil)
|
||||||
|
continue;
|
||||||
|
if((r->type == R_CALL || r->type == R_CALLARM) && r->sym->type == STEXT) {
|
||||||
|
if(s->cfunc == 0 && r->sym->cfunc == 2 && !r->sym->nosplit)
|
||||||
|
print("Go %s calls C %s\n", s->name, r->sym->name);
|
||||||
|
else if(s->cfunc == 2 && s->nosplit && !r->sym->nosplit)
|
||||||
|
print("Go calls C %s calls %s\n", s->name, r->sym->name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -183,6 +183,7 @@ uint16 be16(uchar *b);
|
|||||||
uint32 be32(uchar *b);
|
uint32 be32(uchar *b);
|
||||||
uint64 be64(uchar *b);
|
uint64 be64(uchar *b);
|
||||||
void callgraph(void);
|
void callgraph(void);
|
||||||
|
void checkgo(void);
|
||||||
void cflush(void);
|
void cflush(void);
|
||||||
void codeblk(int64 addr, int64 size);
|
void codeblk(int64 addr, int64 size);
|
||||||
vlong cpos(void);
|
vlong cpos(void);
|
||||||
|
@ -172,6 +172,7 @@ main(int argc, char *argv[])
|
|||||||
mark(linklookup(ctxt, "runtime.read_tls_fallback", 0));
|
mark(linklookup(ctxt, "runtime.read_tls_fallback", 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
checkgo();
|
||||||
deadcode();
|
deadcode();
|
||||||
callgraph();
|
callgraph();
|
||||||
paramspace = "SP"; /* (FP) now (SP) on output */
|
paramspace = "SP"; /* (FP) now (SP) on output */
|
||||||
|
@ -474,7 +474,7 @@ addstacksplit(Link *ctxt, LSym *cursym)
|
|||||||
p->as = AMOVW;
|
p->as = AMOVW;
|
||||||
p->from.type = D_OREG;
|
p->from.type = D_OREG;
|
||||||
p->from.reg = REGG;
|
p->from.reg = REGG;
|
||||||
p->from.offset = 3*ctxt->arch->ptrsize; // G.panic
|
p->from.offset = 4*ctxt->arch->ptrsize; // G.panic
|
||||||
p->to.type = D_REG;
|
p->to.type = D_REG;
|
||||||
p->to.reg = 1;
|
p->to.reg = 1;
|
||||||
|
|
||||||
@ -783,7 +783,9 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt)
|
|||||||
p->as = AMOVW;
|
p->as = AMOVW;
|
||||||
p->from.type = D_OREG;
|
p->from.type = D_OREG;
|
||||||
p->from.reg = REGG;
|
p->from.reg = REGG;
|
||||||
p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard
|
p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
|
||||||
|
if(ctxt->cursym->cfunc)
|
||||||
|
p->from.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
|
||||||
p->to.type = D_REG;
|
p->to.type = D_REG;
|
||||||
p->to.reg = 1;
|
p->to.reg = 1;
|
||||||
|
|
||||||
@ -876,7 +878,10 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt)
|
|||||||
p->as = ABL;
|
p->as = ABL;
|
||||||
p->scond = C_SCOND_LS;
|
p->scond = C_SCOND_LS;
|
||||||
p->to.type = D_BRANCH;
|
p->to.type = D_BRANCH;
|
||||||
p->to.sym = ctxt->symmorestack[noctxt];
|
if(ctxt->cursym->cfunc)
|
||||||
|
p->to.sym = linklookup(ctxt, "runtime.morestackc", 0);
|
||||||
|
else
|
||||||
|
p->to.sym = ctxt->symmorestack[noctxt];
|
||||||
|
|
||||||
// BLS start
|
// BLS start
|
||||||
p = appendp(ctxt, p);
|
p = appendp(ctxt, p);
|
||||||
|
@ -452,7 +452,7 @@ addstacksplit(Link *ctxt, LSym *cursym)
|
|||||||
p = appendp(ctxt, p);
|
p = appendp(ctxt, p);
|
||||||
p->as = AMOVQ;
|
p->as = AMOVQ;
|
||||||
p->from.type = D_INDIR+D_CX;
|
p->from.type = D_INDIR+D_CX;
|
||||||
p->from.offset = 3*ctxt->arch->ptrsize; // G.panic
|
p->from.offset = 4*ctxt->arch->ptrsize; // G.panic
|
||||||
p->to.type = D_BX;
|
p->to.type = D_BX;
|
||||||
if(ctxt->headtype == Hnacl) {
|
if(ctxt->headtype == Hnacl) {
|
||||||
p->as = AMOVL;
|
p->as = AMOVL;
|
||||||
@ -689,7 +689,9 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog
|
|||||||
p->as = cmp;
|
p->as = cmp;
|
||||||
p->from.type = D_SP;
|
p->from.type = D_SP;
|
||||||
indir_cx(ctxt, &p->to);
|
indir_cx(ctxt, &p->to);
|
||||||
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard
|
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
|
||||||
|
if(ctxt->cursym->cfunc)
|
||||||
|
p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
|
||||||
} else if(framesize <= StackBig) {
|
} else if(framesize <= StackBig) {
|
||||||
// large stack: SP-framesize <= stackguard-StackSmall
|
// large stack: SP-framesize <= stackguard-StackSmall
|
||||||
// LEAQ -xxx(SP), AX
|
// LEAQ -xxx(SP), AX
|
||||||
@ -704,7 +706,9 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog
|
|||||||
p->as = cmp;
|
p->as = cmp;
|
||||||
p->from.type = D_AX;
|
p->from.type = D_AX;
|
||||||
indir_cx(ctxt, &p->to);
|
indir_cx(ctxt, &p->to);
|
||||||
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard
|
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
|
||||||
|
if(ctxt->cursym->cfunc)
|
||||||
|
p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
|
||||||
} else {
|
} else {
|
||||||
// Such a large stack we need to protect against wraparound.
|
// Such a large stack we need to protect against wraparound.
|
||||||
// If SP is close to zero:
|
// If SP is close to zero:
|
||||||
@ -724,7 +728,9 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog
|
|||||||
p = appendp(ctxt, p);
|
p = appendp(ctxt, p);
|
||||||
p->as = mov;
|
p->as = mov;
|
||||||
indir_cx(ctxt, &p->from);
|
indir_cx(ctxt, &p->from);
|
||||||
p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard
|
p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
|
||||||
|
if(ctxt->cursym->cfunc)
|
||||||
|
p->from.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
|
||||||
p->to.type = D_SI;
|
p->to.type = D_SI;
|
||||||
|
|
||||||
p = appendp(ctxt, p);
|
p = appendp(ctxt, p);
|
||||||
@ -765,7 +771,10 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog
|
|||||||
p = appendp(ctxt, p);
|
p = appendp(ctxt, p);
|
||||||
p->as = ACALL;
|
p->as = ACALL;
|
||||||
p->to.type = D_BRANCH;
|
p->to.type = D_BRANCH;
|
||||||
p->to.sym = ctxt->symmorestack[noctxt];
|
if(ctxt->cursym->cfunc)
|
||||||
|
p->to.sym = linklookup(ctxt, "runtime.morestackc", 0);
|
||||||
|
else
|
||||||
|
p->to.sym = ctxt->symmorestack[noctxt];
|
||||||
|
|
||||||
p = appendp(ctxt, p);
|
p = appendp(ctxt, p);
|
||||||
p->as = AJMP;
|
p->as = AJMP;
|
||||||
|
@ -335,7 +335,7 @@ addstacksplit(Link *ctxt, LSym *cursym)
|
|||||||
p = appendp(ctxt, p);
|
p = appendp(ctxt, p);
|
||||||
p->as = AMOVL;
|
p->as = AMOVL;
|
||||||
p->from.type = D_INDIR+D_CX;
|
p->from.type = D_INDIR+D_CX;
|
||||||
p->from.offset = 3*ctxt->arch->ptrsize; // G.panic
|
p->from.offset = 4*ctxt->arch->ptrsize; // G.panic
|
||||||
p->to.type = D_BX;
|
p->to.type = D_BX;
|
||||||
|
|
||||||
p = appendp(ctxt, p);
|
p = appendp(ctxt, p);
|
||||||
@ -538,7 +538,9 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok)
|
|||||||
p->as = ACMPL;
|
p->as = ACMPL;
|
||||||
p->from.type = D_SP;
|
p->from.type = D_SP;
|
||||||
p->to.type = D_INDIR+D_CX;
|
p->to.type = D_INDIR+D_CX;
|
||||||
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard
|
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
|
||||||
|
if(ctxt->cursym->cfunc)
|
||||||
|
p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
|
||||||
} else if(framesize <= StackBig) {
|
} else if(framesize <= StackBig) {
|
||||||
// large stack: SP-framesize <= stackguard-StackSmall
|
// large stack: SP-framesize <= stackguard-StackSmall
|
||||||
// LEAL -(framesize-StackSmall)(SP), AX
|
// LEAL -(framesize-StackSmall)(SP), AX
|
||||||
@ -553,7 +555,9 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok)
|
|||||||
p->as = ACMPL;
|
p->as = ACMPL;
|
||||||
p->from.type = D_AX;
|
p->from.type = D_AX;
|
||||||
p->to.type = D_INDIR+D_CX;
|
p->to.type = D_INDIR+D_CX;
|
||||||
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard
|
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
|
||||||
|
if(ctxt->cursym->cfunc)
|
||||||
|
p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
|
||||||
} else {
|
} else {
|
||||||
// Such a large stack we need to protect against wraparound
|
// Such a large stack we need to protect against wraparound
|
||||||
// if SP is close to zero.
|
// if SP is close to zero.
|
||||||
@ -573,7 +577,9 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok)
|
|||||||
p->as = AMOVL;
|
p->as = AMOVL;
|
||||||
p->from.type = D_INDIR+D_CX;
|
p->from.type = D_INDIR+D_CX;
|
||||||
p->from.offset = 0;
|
p->from.offset = 0;
|
||||||
p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard
|
p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
|
||||||
|
if(ctxt->cursym->cfunc)
|
||||||
|
p->from.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
|
||||||
p->to.type = D_SI;
|
p->to.type = D_SI;
|
||||||
|
|
||||||
p = appendp(ctxt, p);
|
p = appendp(ctxt, p);
|
||||||
@ -616,7 +622,10 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok)
|
|||||||
p = appendp(ctxt, p);
|
p = appendp(ctxt, p);
|
||||||
p->as = ACALL;
|
p->as = ACALL;
|
||||||
p->to.type = D_BRANCH;
|
p->to.type = D_BRANCH;
|
||||||
p->to.sym = ctxt->symmorestack[noctxt];
|
if(ctxt->cursym->cfunc)
|
||||||
|
p->to.sym = linklookup(ctxt, "runtime.morestackc", 0);
|
||||||
|
else
|
||||||
|
p->to.sym = ctxt->symmorestack[noctxt];
|
||||||
|
|
||||||
p = appendp(ctxt, p);
|
p = appendp(ctxt, p);
|
||||||
p->as = AJMP;
|
p->as = AJMP;
|
||||||
|
@ -492,7 +492,7 @@ addstacksplit(Link *ctxt, LSym *cursym)
|
|||||||
q->as = AMOVD;
|
q->as = AMOVD;
|
||||||
q->from.type = D_OREG;
|
q->from.type = D_OREG;
|
||||||
q->from.reg = REGG;
|
q->from.reg = REGG;
|
||||||
q->from.offset = 3*ctxt->arch->ptrsize; // G.panic
|
q->from.offset = 4*ctxt->arch->ptrsize; // G.panic
|
||||||
q->to.type = D_REG;
|
q->to.type = D_REG;
|
||||||
q->to.reg = 3;
|
q->to.reg = 3;
|
||||||
|
|
||||||
@ -724,7 +724,9 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt)
|
|||||||
p->as = AMOVD;
|
p->as = AMOVD;
|
||||||
p->from.type = D_OREG;
|
p->from.type = D_OREG;
|
||||||
p->from.reg = REGG;
|
p->from.reg = REGG;
|
||||||
p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard
|
p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
|
||||||
|
if(ctxt->cursym->cfunc)
|
||||||
|
p->from.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
|
||||||
p->to.type = D_REG;
|
p->to.type = D_REG;
|
||||||
p->to.reg = 3;
|
p->to.reg = 3;
|
||||||
|
|
||||||
@ -832,7 +834,10 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt)
|
|||||||
p = appendp(ctxt, p);
|
p = appendp(ctxt, p);
|
||||||
p->as = ABL;
|
p->as = ABL;
|
||||||
p->to.type = D_BRANCH;
|
p->to.type = D_BRANCH;
|
||||||
p->to.sym = ctxt->symmorestack[noctxt];
|
if(ctxt->cursym->cfunc)
|
||||||
|
p->to.sym = linklookup(ctxt, "runtime.morestackc", 0);
|
||||||
|
else
|
||||||
|
p->to.sym = ctxt->symmorestack[noctxt];
|
||||||
|
|
||||||
// BR start
|
// BR start
|
||||||
p = appendp(ctxt, p);
|
p = appendp(ctxt, p);
|
||||||
|
@ -332,6 +332,8 @@ writesym(Link *ctxt, Biobuf *b, LSym *s)
|
|||||||
Bprint(ctxt->bso, "t=%d ", s->type);
|
Bprint(ctxt->bso, "t=%d ", s->type);
|
||||||
if(s->dupok)
|
if(s->dupok)
|
||||||
Bprint(ctxt->bso, "dupok ");
|
Bprint(ctxt->bso, "dupok ");
|
||||||
|
if(s->cfunc)
|
||||||
|
Bprint(ctxt->bso, "cfunc ");
|
||||||
if(s->nosplit)
|
if(s->nosplit)
|
||||||
Bprint(ctxt->bso, "nosplit ");
|
Bprint(ctxt->bso, "nosplit ");
|
||||||
Bprint(ctxt->bso, "size=%lld value=%lld", (vlong)s->size, (vlong)s->value);
|
Bprint(ctxt->bso, "size=%lld value=%lld", (vlong)s->size, (vlong)s->value);
|
||||||
@ -397,7 +399,7 @@ writesym(Link *ctxt, Biobuf *b, LSym *s)
|
|||||||
wrint(b, s->args);
|
wrint(b, s->args);
|
||||||
wrint(b, s->locals);
|
wrint(b, s->locals);
|
||||||
wrint(b, s->nosplit);
|
wrint(b, s->nosplit);
|
||||||
wrint(b, s->leaf);
|
wrint(b, s->leaf | s->cfunc<<1);
|
||||||
n = 0;
|
n = 0;
|
||||||
for(a = s->autom; a != nil; a = a->link)
|
for(a = s->autom; a != nil; a = a->link)
|
||||||
n++;
|
n++;
|
||||||
@ -641,6 +643,7 @@ overwrite:
|
|||||||
s->nosplit = rdint(f);
|
s->nosplit = rdint(f);
|
||||||
v = rdint(f);
|
v = rdint(f);
|
||||||
s->leaf = v&1;
|
s->leaf = v&1;
|
||||||
|
s->cfunc = v&2;
|
||||||
n = rdint(f);
|
n = rdint(f);
|
||||||
for(i=0; i<n; i++) {
|
for(i=0; i<n; i++) {
|
||||||
a = emallocz(sizeof *a);
|
a = emallocz(sizeof *a);
|
||||||
@ -696,6 +699,8 @@ overwrite:
|
|||||||
Bprint(ctxt->bso, "t=%d ", s->type);
|
Bprint(ctxt->bso, "t=%d ", s->type);
|
||||||
if(s->dupok)
|
if(s->dupok)
|
||||||
Bprint(ctxt->bso, "dupok ");
|
Bprint(ctxt->bso, "dupok ");
|
||||||
|
if(s->cfunc)
|
||||||
|
Bprint(ctxt->bso, "cfunc ");
|
||||||
if(s->nosplit)
|
if(s->nosplit)
|
||||||
Bprint(ctxt->bso, "nosplit ");
|
Bprint(ctxt->bso, "nosplit ");
|
||||||
Bprint(ctxt->bso, "size=%lld value=%lld", (vlong)s->size, (vlong)s->value);
|
Bprint(ctxt->bso, "size=%lld value=%lld", (vlong)s->size, (vlong)s->value);
|
||||||
|
@ -20,7 +20,8 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0
|
|||||||
// _cgo_init may update stackguard.
|
// _cgo_init may update stackguard.
|
||||||
MOVL $runtime·g0(SB), BP
|
MOVL $runtime·g0(SB), BP
|
||||||
LEAL (-64*1024+104)(SP), BX
|
LEAL (-64*1024+104)(SP), BX
|
||||||
MOVL BX, g_stackguard(BP)
|
MOVL BX, g_stackguard0(BP)
|
||||||
|
MOVL BX, g_stackguard1(BP)
|
||||||
MOVL BX, (g_stack+stack_lo)(BP)
|
MOVL BX, (g_stack+stack_lo)(BP)
|
||||||
MOVL SP, (g_stack+stack_hi)(BP)
|
MOVL SP, (g_stack+stack_hi)(BP)
|
||||||
|
|
||||||
@ -50,7 +51,8 @@ nocpuinfo:
|
|||||||
MOVL $runtime·g0(SB), CX
|
MOVL $runtime·g0(SB), CX
|
||||||
MOVL (g_stack+stack_lo)(CX), AX
|
MOVL (g_stack+stack_lo)(CX), AX
|
||||||
ADDL $const__StackGuard, AX
|
ADDL $const__StackGuard, AX
|
||||||
MOVL AX, g_stackguard(CX)
|
MOVL AX, g_stackguard0(CX)
|
||||||
|
MOVL AX, g_stackguard1(CX)
|
||||||
|
|
||||||
// skip runtime·ldt0setup(SB) and tls test after _cgo_init for non-windows
|
// skip runtime·ldt0setup(SB) and tls test after _cgo_init for non-windows
|
||||||
CMPL runtime·iswindows(SB), $0
|
CMPL runtime·iswindows(SB), $0
|
||||||
|
@ -20,7 +20,8 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0
|
|||||||
// _cgo_init may update stackguard.
|
// _cgo_init may update stackguard.
|
||||||
MOVQ $runtime·g0(SB), DI
|
MOVQ $runtime·g0(SB), DI
|
||||||
LEAQ (-64*1024+104)(SP), BX
|
LEAQ (-64*1024+104)(SP), BX
|
||||||
MOVQ BX, g_stackguard(DI)
|
MOVQ BX, g_stackguard0(DI)
|
||||||
|
MOVQ BX, g_stackguard1(DI)
|
||||||
MOVQ BX, (g_stack+stack_lo)(DI)
|
MOVQ BX, (g_stack+stack_lo)(DI)
|
||||||
MOVQ SP, (g_stack+stack_hi)(DI)
|
MOVQ SP, (g_stack+stack_hi)(DI)
|
||||||
|
|
||||||
@ -48,7 +49,8 @@ nocpuinfo:
|
|||||||
MOVQ $runtime·g0(SB), CX
|
MOVQ $runtime·g0(SB), CX
|
||||||
MOVQ (g_stack+stack_lo)(CX), AX
|
MOVQ (g_stack+stack_lo)(CX), AX
|
||||||
ADDQ $const__StackGuard, AX
|
ADDQ $const__StackGuard, AX
|
||||||
MOVQ AX, g_stackguard(CX)
|
MOVQ AX, g_stackguard0(CX)
|
||||||
|
MOVQ AX, g_stackguard1(CX)
|
||||||
|
|
||||||
CMPL runtime·iswindows(SB), $0
|
CMPL runtime·iswindows(SB), $0
|
||||||
JEQ ok
|
JEQ ok
|
||||||
|
@ -22,7 +22,8 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0
|
|||||||
// create istack out of the given (operating system) stack.
|
// create istack out of the given (operating system) stack.
|
||||||
MOVL $runtime·g0(SB), DI
|
MOVL $runtime·g0(SB), DI
|
||||||
LEAL (-64*1024+104)(SP), BX
|
LEAL (-64*1024+104)(SP), BX
|
||||||
MOVL BX, g_stackguard(DI)
|
MOVL BX, g_stackguard0(DI)
|
||||||
|
MOVL BX, g_stackguard1(DI)
|
||||||
MOVL BX, (g_stack+stack_lo)(DI)
|
MOVL BX, (g_stack+stack_lo)(DI)
|
||||||
MOVL SP, (g_stack+stack_hi)(DI)
|
MOVL SP, (g_stack+stack_hi)(DI)
|
||||||
|
|
||||||
|
@ -32,7 +32,8 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$-4
|
|||||||
|
|
||||||
// create istack out of the OS stack
|
// create istack out of the OS stack
|
||||||
MOVW $(-8192+104)(R13), R0
|
MOVW $(-8192+104)(R13), R0
|
||||||
MOVW R0, g_stackguard(g)
|
MOVW R0, g_stackguard0(g)
|
||||||
|
MOVW R0, g_stackguard1(g)
|
||||||
MOVW R0, (g_stack+stack_lo)(g)
|
MOVW R0, (g_stack+stack_lo)(g)
|
||||||
MOVW R13, (g_stack+stack_hi)(g)
|
MOVW R13, (g_stack+stack_hi)(g)
|
||||||
|
|
||||||
@ -55,7 +56,8 @@ nocgo:
|
|||||||
// update stackguard after _cgo_init
|
// update stackguard after _cgo_init
|
||||||
MOVW (g_stack+stack_lo)(g), R0
|
MOVW (g_stack+stack_lo)(g), R0
|
||||||
ADD $const__StackGuard, R0
|
ADD $const__StackGuard, R0
|
||||||
MOVW R0, g_stackguard(g)
|
MOVW R0, g_stackguard0(g)
|
||||||
|
MOVW R0, g_stackguard1(g)
|
||||||
|
|
||||||
BL runtime·checkgoarm(SB)
|
BL runtime·checkgoarm(SB)
|
||||||
BL runtime·check(SB)
|
BL runtime·check(SB)
|
||||||
|
@ -22,7 +22,8 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0
|
|||||||
MOVD $runtime·g0(SB), g
|
MOVD $runtime·g0(SB), g
|
||||||
MOVD $(-64*1024), R31
|
MOVD $(-64*1024), R31
|
||||||
ADD R31, R1, R3
|
ADD R31, R1, R3
|
||||||
MOVD R3, g_stackguard(g)
|
MOVD R3, g_stackguard0(g)
|
||||||
|
MOVD R3, g_stackguard1(g)
|
||||||
MOVD R3, (g_stack+stack_lo)(g)
|
MOVD R3, (g_stack+stack_lo)(g)
|
||||||
MOVD R1, (g_stack+stack_hi)(g)
|
MOVD R1, (g_stack+stack_hi)(g)
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ func unlock(l *mutex) {
|
|||||||
throw("runtime·unlock: lock count")
|
throw("runtime·unlock: lock count")
|
||||||
}
|
}
|
||||||
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
|
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
|
||||||
gp.stackguard = stackPreempt
|
gp.stackguard0 = stackPreempt
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ func unlock(l *mutex) {
|
|||||||
throw("runtime·unlock: lock count")
|
throw("runtime·unlock: lock count")
|
||||||
}
|
}
|
||||||
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
|
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
|
||||||
gp.stackguard = stackPreempt
|
gp.stackguard0 = stackPreempt
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
|
|||||||
}
|
}
|
||||||
mp.mallocing = 1
|
mp.mallocing = 1
|
||||||
if mp.curg != nil {
|
if mp.curg != nil {
|
||||||
mp.curg.stackguard = ^uintptr(0xfff) | 0xbad
|
mp.curg.stackguard0 = ^uintptr(0xfff) | 0xbad
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,7 +127,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
|
|||||||
}
|
}
|
||||||
mp.mallocing = 0
|
mp.mallocing = 0
|
||||||
if mp.curg != nil {
|
if mp.curg != nil {
|
||||||
mp.curg.stackguard = mp.curg.stack.lo + _StackGuard
|
mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard
|
||||||
}
|
}
|
||||||
// Note: one releasem for the acquirem just above.
|
// Note: one releasem for the acquirem just above.
|
||||||
// The other for the acquirem at start of malloc.
|
// The other for the acquirem at start of malloc.
|
||||||
@ -319,7 +319,7 @@ marked:
|
|||||||
}
|
}
|
||||||
mp.mallocing = 0
|
mp.mallocing = 0
|
||||||
if mp.curg != nil {
|
if mp.curg != nil {
|
||||||
mp.curg.stackguard = mp.curg.stack.lo + _StackGuard
|
mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard
|
||||||
}
|
}
|
||||||
// Note: one releasem for the acquirem just above.
|
// Note: one releasem for the acquirem just above.
|
||||||
// The other for the acquirem at start of malloc.
|
// The other for the acquirem at start of malloc.
|
||||||
|
@ -179,6 +179,9 @@ func mcommoninit(mp *m) {
|
|||||||
sched.mcount++
|
sched.mcount++
|
||||||
checkmcount()
|
checkmcount()
|
||||||
mpreinit(mp)
|
mpreinit(mp)
|
||||||
|
if mp.gsignal != nil {
|
||||||
|
mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
|
||||||
|
}
|
||||||
|
|
||||||
// Add to allm so garbage collector doesn't free g->m
|
// Add to allm so garbage collector doesn't free g->m
|
||||||
// when it is just in a register or thread-local storage.
|
// when it is just in a register or thread-local storage.
|
||||||
@ -210,7 +213,7 @@ func ready(gp *g) {
|
|||||||
}
|
}
|
||||||
_g_.m.locks--
|
_g_.m.locks--
|
||||||
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
|
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
|
||||||
_g_.stackguard = stackPreempt
|
_g_.stackguard0 = stackPreempt
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -460,7 +463,7 @@ func stopg(gp *g) bool {
|
|||||||
if !gp.gcworkdone {
|
if !gp.gcworkdone {
|
||||||
gp.preemptscan = true
|
gp.preemptscan = true
|
||||||
gp.preempt = true
|
gp.preempt = true
|
||||||
gp.stackguard = stackPreempt
|
gp.stackguard0 = stackPreempt
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unclaim.
|
// Unclaim.
|
||||||
@ -542,7 +545,7 @@ func mquiesce(gpmaster *g) {
|
|||||||
gp.gcworkdone = true // scan is a noop
|
gp.gcworkdone = true // scan is a noop
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if status == _Grunning && gp.stackguard == uintptr(stackPreempt) && notetsleep(&sched.stopnote, 100*1000) { // nanosecond arg
|
if status == _Grunning && gp.stackguard0 == uintptr(stackPreempt) && notetsleep(&sched.stopnote, 100*1000) { // nanosecond arg
|
||||||
noteclear(&sched.stopnote)
|
noteclear(&sched.stopnote)
|
||||||
} else {
|
} else {
|
||||||
stopscanstart(gp)
|
stopscanstart(gp)
|
||||||
@ -701,7 +704,7 @@ func starttheworld() {
|
|||||||
}
|
}
|
||||||
_g_.m.locks--
|
_g_.m.locks--
|
||||||
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
|
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
|
||||||
_g_.stackguard = stackPreempt
|
_g_.stackguard0 = stackPreempt
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -722,7 +725,8 @@ func mstart() {
|
|||||||
}
|
}
|
||||||
// Initialize stack guards so that we can start calling
|
// Initialize stack guards so that we can start calling
|
||||||
// both Go and C functions with stack growth prologues.
|
// both Go and C functions with stack growth prologues.
|
||||||
_g_.stackguard = _g_.stack.lo + _StackGuard
|
_g_.stackguard0 = _g_.stack.lo + _StackGuard
|
||||||
|
_g_.stackguard1 = _g_.stackguard0
|
||||||
mstart1()
|
mstart1()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -802,7 +806,7 @@ func allocm(_p_ *p) *m {
|
|||||||
}
|
}
|
||||||
_g_.m.locks--
|
_g_.m.locks--
|
||||||
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
|
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
|
||||||
_g_.stackguard = stackPreempt
|
_g_.stackguard0 = stackPreempt
|
||||||
}
|
}
|
||||||
|
|
||||||
return mp
|
return mp
|
||||||
@ -879,7 +883,7 @@ func needm(x byte) {
|
|||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024
|
_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024
|
||||||
_g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
|
_g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
|
||||||
_g_.stackguard = _g_.stack.lo + _StackGuard
|
_g_.stackguard0 = _g_.stack.lo + _StackGuard
|
||||||
|
|
||||||
// Initialize this thread to use the m.
|
// Initialize this thread to use the m.
|
||||||
asminit()
|
asminit()
|
||||||
@ -1217,7 +1221,7 @@ func execute(gp *g) {
|
|||||||
casgstatus(gp, _Grunnable, _Grunning)
|
casgstatus(gp, _Grunnable, _Grunning)
|
||||||
gp.waitsince = 0
|
gp.waitsince = 0
|
||||||
gp.preempt = false
|
gp.preempt = false
|
||||||
gp.stackguard = gp.stack.lo + _StackGuard
|
gp.stackguard0 = gp.stack.lo + _StackGuard
|
||||||
_g_.m.p.schedtick++
|
_g_.m.p.schedtick++
|
||||||
_g_.m.curg = gp
|
_g_.m.curg = gp
|
||||||
gp.m = _g_.m
|
gp.m = _g_.m
|
||||||
@ -1613,7 +1617,7 @@ func reentersyscall(pc, sp uintptr) {
|
|||||||
// (See details in comment above.)
|
// (See details in comment above.)
|
||||||
// Catch calls that might, by replacing the stack guard with something that
|
// Catch calls that might, by replacing the stack guard with something that
|
||||||
// will trip any stack check and leaving a flag to tell newstack to die.
|
// will trip any stack check and leaving a flag to tell newstack to die.
|
||||||
_g_.stackguard = stackPreempt
|
_g_.stackguard0 = stackPreempt
|
||||||
_g_.throwsplit = true
|
_g_.throwsplit = true
|
||||||
|
|
||||||
// Leave SP around for GC and traceback.
|
// Leave SP around for GC and traceback.
|
||||||
@ -1644,7 +1648,7 @@ func reentersyscall(pc, sp uintptr) {
|
|||||||
// Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
|
// Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
|
||||||
// We set _StackGuard to StackPreempt so that first split stack check calls morestack.
|
// We set _StackGuard to StackPreempt so that first split stack check calls morestack.
|
||||||
// Morestack detects this case and throws.
|
// Morestack detects this case and throws.
|
||||||
_g_.stackguard = stackPreempt
|
_g_.stackguard0 = stackPreempt
|
||||||
_g_.m.locks--
|
_g_.m.locks--
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1682,7 +1686,7 @@ func entersyscallblock(dummy int32) {
|
|||||||
|
|
||||||
_g_.m.locks++ // see comment in entersyscall
|
_g_.m.locks++ // see comment in entersyscall
|
||||||
_g_.throwsplit = true
|
_g_.throwsplit = true
|
||||||
_g_.stackguard = stackPreempt // see comment in entersyscall
|
_g_.stackguard0 = stackPreempt // see comment in entersyscall
|
||||||
|
|
||||||
// Leave SP around for GC and traceback.
|
// Leave SP around for GC and traceback.
|
||||||
pc := getcallerpc(unsafe.Pointer(&dummy))
|
pc := getcallerpc(unsafe.Pointer(&dummy))
|
||||||
@ -1748,10 +1752,10 @@ func exitsyscall(dummy int32) {
|
|||||||
_g_.m.locks--
|
_g_.m.locks--
|
||||||
if _g_.preempt {
|
if _g_.preempt {
|
||||||
// restore the preemption request in case we've cleared it in newstack
|
// restore the preemption request in case we've cleared it in newstack
|
||||||
_g_.stackguard = stackPreempt
|
_g_.stackguard0 = stackPreempt
|
||||||
} else {
|
} else {
|
||||||
// otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
|
// otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
|
||||||
_g_.stackguard = _g_.stack.lo + _StackGuard
|
_g_.stackguard0 = _g_.stack.lo + _StackGuard
|
||||||
}
|
}
|
||||||
_g_.throwsplit = false
|
_g_.throwsplit = false
|
||||||
return
|
return
|
||||||
@ -1869,7 +1873,7 @@ func beforefork() {
|
|||||||
// Code between fork and exec must not allocate memory nor even try to grow stack.
|
// Code between fork and exec must not allocate memory nor even try to grow stack.
|
||||||
// Here we spoil g->_StackGuard to reliably detect any attempts to grow stack.
|
// Here we spoil g->_StackGuard to reliably detect any attempts to grow stack.
|
||||||
// runtime_AfterFork will undo this in parent process, but not in child.
|
// runtime_AfterFork will undo this in parent process, but not in child.
|
||||||
gp.stackguard = stackFork
|
gp.stackguard0 = stackFork
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called from syscall package before fork.
|
// Called from syscall package before fork.
|
||||||
@ -1883,7 +1887,7 @@ func afterfork() {
|
|||||||
gp := getg().m.curg
|
gp := getg().m.curg
|
||||||
|
|
||||||
// See the comment in beforefork.
|
// See the comment in beforefork.
|
||||||
gp.stackguard = gp.stack.lo + _StackGuard
|
gp.stackguard0 = gp.stack.lo + _StackGuard
|
||||||
|
|
||||||
hz := sched.profilehz
|
hz := sched.profilehz
|
||||||
if hz != 0 {
|
if hz != 0 {
|
||||||
@ -1907,7 +1911,8 @@ func malg(stacksize int32) *g {
|
|||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
newg.stack = stackalloc(uint32(stacksize))
|
newg.stack = stackalloc(uint32(stacksize))
|
||||||
})
|
})
|
||||||
newg.stackguard = newg.stack.lo + _StackGuard
|
newg.stackguard0 = newg.stack.lo + _StackGuard
|
||||||
|
newg.stackguard1 = ^uintptr(0)
|
||||||
}
|
}
|
||||||
return newg
|
return newg
|
||||||
}
|
}
|
||||||
@ -2003,7 +2008,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr
|
|||||||
}
|
}
|
||||||
_g_.m.locks--
|
_g_.m.locks--
|
||||||
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
|
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
|
||||||
_g_.stackguard = stackPreempt
|
_g_.stackguard0 = stackPreempt
|
||||||
}
|
}
|
||||||
return newg
|
return newg
|
||||||
}
|
}
|
||||||
@ -2022,7 +2027,7 @@ func gfput(_p_ *p, gp *g) {
|
|||||||
stackfree(gp.stack)
|
stackfree(gp.stack)
|
||||||
gp.stack.lo = 0
|
gp.stack.lo = 0
|
||||||
gp.stack.hi = 0
|
gp.stack.hi = 0
|
||||||
gp.stackguard = 0
|
gp.stackguard0 = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
gp.schedlink = _p_.gfree
|
gp.schedlink = _p_.gfree
|
||||||
@ -2068,7 +2073,7 @@ retry:
|
|||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
gp.stack = stackalloc(_FixedStack)
|
gp.stack = stackalloc(_FixedStack)
|
||||||
})
|
})
|
||||||
gp.stackguard = gp.stack.lo + _StackGuard
|
gp.stackguard0 = gp.stack.lo + _StackGuard
|
||||||
} else {
|
} else {
|
||||||
if raceenabled {
|
if raceenabled {
|
||||||
racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
|
racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
|
||||||
@ -2773,10 +2778,10 @@ func preemptone(_p_ *p) bool {
|
|||||||
gp.preempt = true
|
gp.preempt = true
|
||||||
|
|
||||||
// Every call in a go routine checks for stack overflow by
|
// Every call in a go routine checks for stack overflow by
|
||||||
// comparing the current stack pointer to gp->stackguard.
|
// comparing the current stack pointer to gp->stackguard0.
|
||||||
// Setting gp->stackguard to StackPreempt folds
|
// Setting gp->stackguard0 to StackPreempt folds
|
||||||
// preemption into the normal stack overflow check.
|
// preemption into the normal stack overflow check.
|
||||||
gp.stackguard = stackPreempt
|
gp.stackguard0 = stackPreempt
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -386,7 +386,7 @@ func releasem(mp *m) {
|
|||||||
mp.locks--
|
mp.locks--
|
||||||
if mp.locks == 0 && _g_.preempt {
|
if mp.locks == 0 && _g_.preempt {
|
||||||
// restore the preemption request in case we've cleared it in newstack
|
// restore the preemption request in case we've cleared it in newstack
|
||||||
_g_.stackguard = stackPreempt
|
_g_.stackguard0 = stackPreempt
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,10 +154,14 @@ type stack struct {
|
|||||||
type g struct {
|
type g struct {
|
||||||
// Stack parameters.
|
// Stack parameters.
|
||||||
// stack describes the actual stack memory: [stack.lo, stack.hi).
|
// stack describes the actual stack memory: [stack.lo, stack.hi).
|
||||||
// stackguard is the stack pointer compared in the Go stack growth prologue.
|
// stackguard0 is the stack pointer compared in the Go stack growth prologue.
|
||||||
// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
|
// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
|
||||||
stack stack // offset known to runtime/cgo
|
// stackguard1 is the stack pointer compared in the C stack growth prologue.
|
||||||
stackguard uintptr // offset known to liblink
|
// It is stack.lo+StackGuard on g0 and gsignal stacks.
|
||||||
|
// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
|
||||||
|
stack stack // offset known to runtime/cgo
|
||||||
|
stackguard0 uintptr // offset known to liblink
|
||||||
|
stackguard1 uintptr // offset known to liblink
|
||||||
|
|
||||||
_panic *_panic // innermost panic - offset known to liblink
|
_panic *_panic // innermost panic - offset known to liblink
|
||||||
_defer *_defer // innermost defer
|
_defer *_defer // innermost defer
|
||||||
@ -171,7 +175,7 @@ type g struct {
|
|||||||
waitreason string // if status==gwaiting
|
waitreason string // if status==gwaiting
|
||||||
schedlink *g
|
schedlink *g
|
||||||
issystem bool // do not output in stack dump, ignore in deadlock detector
|
issystem bool // do not output in stack dump, ignore in deadlock detector
|
||||||
preempt bool // preemption signal, duplicates stackguard = stackpreempt
|
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
|
||||||
paniconfault bool // panic (instead of crash) on unexpected fault address
|
paniconfault bool // panic (instead of crash) on unexpected fault address
|
||||||
preemptscan bool // preempted g does scan for gc
|
preemptscan bool // preempted g does scan for gc
|
||||||
gcworkdone bool // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
|
gcworkdone bool // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
|
||||||
|
@ -26,13 +26,13 @@ const (
|
|||||||
poisonStack = uintptrMask & 0x6868686868686868
|
poisonStack = uintptrMask & 0x6868686868686868
|
||||||
|
|
||||||
// Goroutine preemption request.
|
// Goroutine preemption request.
|
||||||
// Stored into g->stackguard to cause split stack check failure.
|
// Stored into g->stackguard0 to cause split stack check failure.
|
||||||
// Must be greater than any real sp.
|
// Must be greater than any real sp.
|
||||||
// 0xfffffade in hex.
|
// 0xfffffade in hex.
|
||||||
stackPreempt = uintptrMask & -1314
|
stackPreempt = uintptrMask & -1314
|
||||||
|
|
||||||
// Thread is forking.
|
// Thread is forking.
|
||||||
// Stored into g->stackguard to cause split stack check failure.
|
// Stored into g->stackguard0 to cause split stack check failure.
|
||||||
// Must be greater than any real sp.
|
// Must be greater than any real sp.
|
||||||
stackFork = uintptrMask & -1234
|
stackFork = uintptrMask & -1234
|
||||||
)
|
)
|
||||||
@ -566,7 +566,7 @@ func copystack(gp *g, newsize uintptr) {
|
|||||||
|
|
||||||
// Swap out old stack for new one
|
// Swap out old stack for new one
|
||||||
gp.stack = new
|
gp.stack = new
|
||||||
gp.stackguard = new.lo + _StackGuard // NOTE: might clobber a preempt request
|
gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
|
||||||
gp.sched.sp = new.hi - used
|
gp.sched.sp = new.hi - used
|
||||||
|
|
||||||
// free old stack
|
// free old stack
|
||||||
@ -611,7 +611,7 @@ func round2(x int32) int32 {
|
|||||||
func newstack() {
|
func newstack() {
|
||||||
thisg := getg()
|
thisg := getg()
|
||||||
// TODO: double check all gp. shouldn't be getg().
|
// TODO: double check all gp. shouldn't be getg().
|
||||||
if thisg.m.morebuf.g.stackguard == stackFork {
|
if thisg.m.morebuf.g.stackguard0 == stackFork {
|
||||||
throw("stack growth after fork")
|
throw("stack growth after fork")
|
||||||
}
|
}
|
||||||
if thisg.m.morebuf.g != thisg.m.curg {
|
if thisg.m.morebuf.g != thisg.m.curg {
|
||||||
@ -674,7 +674,7 @@ func newstack() {
|
|||||||
writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
|
writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
|
||||||
}
|
}
|
||||||
|
|
||||||
if gp.stackguard == stackPreempt {
|
if gp.stackguard0 == stackPreempt {
|
||||||
if gp == thisg.m.g0 {
|
if gp == thisg.m.g0 {
|
||||||
throw("runtime: preempt g0")
|
throw("runtime: preempt g0")
|
||||||
}
|
}
|
||||||
@ -689,7 +689,7 @@ func newstack() {
|
|||||||
gcphasework(gp)
|
gcphasework(gp)
|
||||||
casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
|
casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
|
||||||
casgstatus(gp, _Gwaiting, _Grunning)
|
casgstatus(gp, _Gwaiting, _Grunning)
|
||||||
gp.stackguard = gp.stack.lo + _StackGuard
|
gp.stackguard0 = gp.stack.lo + _StackGuard
|
||||||
gp.preempt = false
|
gp.preempt = false
|
||||||
gp.preemptscan = false // Tells the GC premption was successful.
|
gp.preemptscan = false // Tells the GC premption was successful.
|
||||||
gogo(&gp.sched) // never return
|
gogo(&gp.sched) // never return
|
||||||
@ -700,7 +700,7 @@ func newstack() {
|
|||||||
if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.gcing != 0 || thisg.m.p.status != _Prunning {
|
if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.gcing != 0 || thisg.m.p.status != _Prunning {
|
||||||
// Let the goroutine keep running for now.
|
// Let the goroutine keep running for now.
|
||||||
// gp->preempt is set, so it will be preempted next time.
|
// gp->preempt is set, so it will be preempted next time.
|
||||||
gp.stackguard = gp.stack.lo + _StackGuard
|
gp.stackguard0 = gp.stack.lo + _StackGuard
|
||||||
casgstatus(gp, _Gwaiting, _Grunning)
|
casgstatus(gp, _Gwaiting, _Grunning)
|
||||||
gogo(&gp.sched) // never return
|
gogo(&gp.sched) // never return
|
||||||
}
|
}
|
||||||
@ -804,3 +804,10 @@ func shrinkfinish() {
|
|||||||
s = t
|
s = t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//go:nosplit
|
||||||
|
func morestackc() {
|
||||||
|
systemstack(func() {
|
||||||
|
throw("attempt to execute C code on Go stack")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
@ -97,7 +97,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Goroutine preemption request.
|
// Goroutine preemption request.
|
||||||
// Stored into g->stackguard to cause split stack check failure.
|
// Stored into g->stackguard0 to cause split stack check failure.
|
||||||
// Must be greater than any real sp.
|
// Must be greater than any real sp.
|
||||||
// 0xfffffade in hex.
|
// 0xfffffade in hex.
|
||||||
const (
|
const (
|
||||||
|
@ -148,7 +148,8 @@ TEXT runtime·tstart_plan9(SB),NOSPLIT,$0
|
|||||||
MOVL AX, (g_stack+stack_hi)(DX)
|
MOVL AX, (g_stack+stack_hi)(DX)
|
||||||
SUBL $(64*1024), AX // stack size
|
SUBL $(64*1024), AX // stack size
|
||||||
MOVL AX, (g_stack+stack_lo)(DX)
|
MOVL AX, (g_stack+stack_lo)(DX)
|
||||||
MOVL AX, g_stackguard(DX)
|
MOVL AX, g_stackguard0(DX)
|
||||||
|
MOVL AX, g_stackguard1(DX)
|
||||||
|
|
||||||
// Initialize procid from TOS struct.
|
// Initialize procid from TOS struct.
|
||||||
MOVL _tos(SB), AX
|
MOVL _tos(SB), AX
|
||||||
|
@ -145,7 +145,8 @@ TEXT runtime·tstart_plan9(SB),NOSPLIT,$0
|
|||||||
MOVQ AX, (g_stack+stack_hi)(DX)
|
MOVQ AX, (g_stack+stack_hi)(DX)
|
||||||
SUBQ $(64*1024), AX // stack size
|
SUBQ $(64*1024), AX // stack size
|
||||||
MOVQ AX, (g_stack+stack_lo)(DX)
|
MOVQ AX, (g_stack+stack_lo)(DX)
|
||||||
MOVQ AX, g_stackguard(DX)
|
MOVQ AX, g_stackguard0(DX)
|
||||||
|
MOVQ AX, g_stackguard1(DX)
|
||||||
|
|
||||||
// Initialize procid from TOS struct.
|
// Initialize procid from TOS struct.
|
||||||
MOVQ _tos(SB), AX
|
MOVQ _tos(SB), AX
|
||||||
|
@ -134,7 +134,8 @@ TEXT runtime·tstart_sysvicall(SB),NOSPLIT,$0
|
|||||||
SUBQ $(0x100000), AX // stack size
|
SUBQ $(0x100000), AX // stack size
|
||||||
MOVQ AX, (g_stack+stack_lo)(DX)
|
MOVQ AX, (g_stack+stack_lo)(DX)
|
||||||
ADDQ $const__StackGuard, AX
|
ADDQ $const__StackGuard, AX
|
||||||
MOVQ AX, g_stackguard(DX)
|
MOVQ AX, g_stackguard0(DX)
|
||||||
|
MOVQ AX, g_stackguard1(DX)
|
||||||
|
|
||||||
// Someday the convention will be D is always cleared.
|
// Someday the convention will be D is always cleared.
|
||||||
CLD
|
CLD
|
||||||
|
@ -209,7 +209,8 @@ TEXT runtime·externalthreadhandler(SB),NOSPLIT,$0
|
|||||||
LEAL -8192(SP), CX
|
LEAL -8192(SP), CX
|
||||||
MOVL CX, (g_stack+stack_lo)(SP)
|
MOVL CX, (g_stack+stack_lo)(SP)
|
||||||
ADDL $const__StackGuard, CX
|
ADDL $const__StackGuard, CX
|
||||||
MOVL CX, g_stackguard(SP)
|
MOVL CX, g_stackguard0(SP)
|
||||||
|
MOVL CX, g_stackguard1(SP)
|
||||||
MOVL DX, (g_stack+stack_hi)(SP)
|
MOVL DX, (g_stack+stack_hi)(SP)
|
||||||
|
|
||||||
PUSHL 16(BP) // arg for handler
|
PUSHL 16(BP) // arg for handler
|
||||||
@ -314,7 +315,8 @@ TEXT runtime·tstart(SB),NOSPLIT,$0
|
|||||||
SUBL $(64*1024), AX // stack size
|
SUBL $(64*1024), AX // stack size
|
||||||
MOVL AX, (g_stack+stack_lo)(DX)
|
MOVL AX, (g_stack+stack_lo)(DX)
|
||||||
ADDL $const__StackGuard, AX
|
ADDL $const__StackGuard, AX
|
||||||
MOVL AX, g_stackguard(DX)
|
MOVL AX, g_stackguard0(DX)
|
||||||
|
MOVL AX, g_stackguard1(DX)
|
||||||
|
|
||||||
// Set up tls.
|
// Set up tls.
|
||||||
LEAL m_tls(CX), SI
|
LEAL m_tls(CX), SI
|
||||||
|
@ -246,7 +246,8 @@ TEXT runtime·externalthreadhandler(SB),NOSPLIT,$0
|
|||||||
LEAQ -8192(SP), CX
|
LEAQ -8192(SP), CX
|
||||||
MOVQ CX, (g_stack+stack_lo)(SP)
|
MOVQ CX, (g_stack+stack_lo)(SP)
|
||||||
ADDQ $const__StackGuard, CX
|
ADDQ $const__StackGuard, CX
|
||||||
MOVQ CX, g_stackguard(SP)
|
MOVQ CX, g_stackguard0(SP)
|
||||||
|
MOVQ CX, g_stackguard1(SP)
|
||||||
MOVQ DX, (g_stack+stack_hi)(SP)
|
MOVQ DX, (g_stack+stack_hi)(SP)
|
||||||
|
|
||||||
PUSHQ 32(BP) // arg for handler
|
PUSHQ 32(BP) // arg for handler
|
||||||
@ -355,7 +356,8 @@ TEXT runtime·tstart_stdcall(SB),NOSPLIT,$0
|
|||||||
SUBQ $(64*1024), AX // stack size
|
SUBQ $(64*1024), AX // stack size
|
||||||
MOVQ AX, (g_stack+stack_lo)(DX)
|
MOVQ AX, (g_stack+stack_lo)(DX)
|
||||||
ADDQ $const__StackGuard, AX
|
ADDQ $const__StackGuard, AX
|
||||||
MOVQ AX, g_stackguard(DX)
|
MOVQ AX, g_stackguard0(DX)
|
||||||
|
MOVQ AX, g_stackguard1(DX)
|
||||||
|
|
||||||
// Set up tls.
|
// Set up tls.
|
||||||
LEAQ m_tls(CX), SI
|
LEAQ m_tls(CX), SI
|
||||||
|
Loading…
Reference in New Issue
Block a user