1
0
mirror of https://github.com/golang/go synced 2024-11-12 06:30:21 -07:00

runtime: allocate internal symbol table eagerly

we need it for GC anyway.

R=golang-dev, khr, dave, khr
CC=golang-dev
https://golang.org/cl/9728044
This commit is contained in:
Dmitriy Vyukov 2013-05-28 21:10:10 +04:00
parent 4d6bfcf245
commit 081129e286
4 changed files with 5 additions and 32 deletions

View File

@ -128,10 +128,6 @@ runtime·SetCPUProfileRate(intgo hz)
uintptr *p;
uintptr n;
// Call findfunc now so that it won't have to
// build tables during the signal handler.
runtime·findfunc(0);
// Clamp hz to something reasonable.
if(hz < 0)
hz = 0;

View File

@ -133,10 +133,8 @@ runtime·schedinit(void)
runtime·goargs();
runtime·goenvs();
// For debugging:
// Allocate internal symbol table representation now,
// so that we don't need to call malloc when we crash.
// runtime·findfunc(0);
// Allocate internal symbol table representation now, we need it for GC anyway.
runtime·symtabinit();
runtime·sched.lastpoll = runtime·nanotime();
procs = 1;

View File

@ -749,6 +749,7 @@ void runtime·mpreinit(M*);
void runtime·minit(void);
void runtime·unminit(void);
void runtime·signalstack(byte*, int32);
void runtime·symtabinit(void);
Func* runtime·findfunc(uintptr);
int32 runtime·funcline(Func*, uintptr);
void* runtime·stackalloc(uint32);

View File

@ -193,8 +193,6 @@ static int32 nfunc;
static byte **fname;
static int32 nfname;
static uint32 funcinit;
static Lock funclock;
static uintptr lastvalue;
static void
@ -539,8 +537,8 @@ runtime·funcline_go(Func *f, uintptr targetpc, String retfile, intgo retline)
FLUSH(&retline);
}
static void
buildfuncs(void)
void
runtime·symtabinit(void)
{
extern byte etext[];
@ -591,26 +589,6 @@ runtime·findfunc(uintptr addr)
Func *f;
int32 nf, n;
// Use atomic double-checked locking,
// because when called from pprof signal
// handler, findfunc must run without
// grabbing any locks.
// (Before enabling the signal handler,
// SetCPUProfileRate calls findfunc to trigger
// the initialization outside the handler.)
// Avoid deadlock on fault during malloc
// by not calling buildfuncs if we're already in malloc.
if(!m->mallocing && !m->gcing) {
if(runtime·atomicload(&funcinit) == 0) {
runtime·lock(&funclock);
if(funcinit == 0) {
buildfuncs();
runtime·atomicstore(&funcinit, 1);
}
runtime·unlock(&funclock);
}
}
if(nfunc == 0)
return nil;
if(addr < func[0].entry || addr >= func[nfunc].entry)