1
0
mirror of https://github.com/golang/go synced 2024-10-04 14:21:21 -06:00
go/src/pkg/runtime/runtime.c
Dmitriy Vyukov 4cc7bf326a pprof: add goroutine blocking profiling
The profiler collects goroutine blocking information similar to Google Perf Tools.
You may see an example of the profile (converted to svg) attached to
http://code.google.com/p/go/issues/detail?id=3946
The public API changes are:
+pkg runtime, func BlockProfile([]BlockProfileRecord) (int, bool)
+pkg runtime, func SetBlockProfileRate(int)
+pkg runtime, method (*BlockProfileRecord) Stack() []uintptr
+pkg runtime, type BlockProfileRecord struct
+pkg runtime, type BlockProfileRecord struct, Count int64
+pkg runtime, type BlockProfileRecord struct, Cycles int64
+pkg runtime, type BlockProfileRecord struct, embedded StackRecord

R=rsc, dave, minux.ma, r
CC=gobot, golang-dev, r, remyoudompheng
https://golang.org/cl/6443115
2012-10-06 12:56:04 +04:00

398 lines
7.5 KiB
C
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "runtime.h"
#include "arch_GOARCH.h"
enum {
maxround = sizeof(uintptr),
};
/*
* We assume that all architectures turn faults and the like
* into apparent calls to runtime.sigpanic. If we see a "call"
* to runtime.sigpanic, we do not back up the PC to find the
* line number of the CALL instruction, because there is no CALL.
*/
void runtime·sigpanic(void);
int32
runtime·gotraceback(void)
{
byte *p;
p = runtime·getenv("GOTRACEBACK");
if(p == nil || p[0] == '\0')
return 1; // default is on
return runtime·atoi(p);
}
int32
runtime·mcmp(byte *s1, byte *s2, uint32 n)
{
uint32 i;
byte c1, c2;
for(i=0; i<n; i++) {
c1 = s1[i];
c2 = s2[i];
if(c1 < c2)
return -1;
if(c1 > c2)
return +1;
}
return 0;
}
byte*
runtime·mchr(byte *p, byte c, byte *ep)
{
for(; p < ep; p++)
if(*p == c)
return p;
return nil;
}
static int32 argc;
static uint8** argv;
Slice os·Args;
Slice syscall·envs;
void (*runtime·sysargs)(int32, uint8**);
void
runtime·args(int32 c, uint8 **v)
{
argc = c;
argv = v;
if(runtime·sysargs != nil)
runtime·sysargs(c, v);
}
int32 runtime·isplan9;
int32 runtime·iswindows;
void
runtime·goargs(void)
{
String *s;
int32 i;
// for windows implementation see "os" package
if(Windows)
return;
s = runtime·malloc(argc*sizeof s[0]);
for(i=0; i<argc; i++)
s[i] = runtime·gostringnocopy(argv[i]);
os·Args.array = (byte*)s;
os·Args.len = argc;
os·Args.cap = argc;
}
void
runtime·goenvs_unix(void)
{
String *s;
int32 i, n;
for(n=0; argv[argc+1+n] != 0; n++)
;
s = runtime·malloc(n*sizeof s[0]);
for(i=0; i<n; i++)
s[i] = runtime·gostringnocopy(argv[argc+1+i]);
syscall·envs.array = (byte*)s;
syscall·envs.len = n;
syscall·envs.cap = n;
}
byte*
runtime·getenv(int8 *s)
{
int32 i, j, len;
byte *v, *bs;
String* envv;
int32 envc;
bs = (byte*)s;
len = runtime·findnull(bs);
envv = (String*)syscall·envs.array;
envc = syscall·envs.len;
for(i=0; i<envc; i++){
if(envv[i].len <= len)
continue;
v = envv[i].str;
for(j=0; j<len; j++)
if(bs[j] != v[j])
goto nomatch;
if(v[len] != '=')
goto nomatch;
return v+len+1;
nomatch:;
}
return nil;
}
void (*libcgo_setenv)(byte**);
// Update the C environment if cgo is loaded.
// Called from syscall.Setenv.
void
syscall·setenv_c(String k, String v)
{
byte *arg[2];
if(libcgo_setenv == nil)
return;
arg[0] = runtime·malloc(k.len + 1);
runtime·memmove(arg[0], k.str, k.len);
arg[0][k.len] = 0;
arg[1] = runtime·malloc(v.len + 1);
runtime·memmove(arg[1], v.str, v.len);
arg[1][v.len] = 0;
runtime·asmcgocall((void*)libcgo_setenv, arg);
runtime·free(arg[0]);
runtime·free(arg[1]);
}
void
runtime·getgoroot(String out)
{
byte *p;
p = runtime·getenv("GOROOT");
out = runtime·gostringnocopy(p);
FLUSH(&out);
}
int32
runtime·atoi(byte *p)
{
int32 n;
n = 0;
while('0' <= *p && *p <= '9')
n = n*10 + *p++ - '0';
return n;
}
static void
TestAtomic64(void)
{
uint64 z64, x64;
z64 = 42;
x64 = 0;
PREFETCH(&z64);
if(runtime·cas64(&z64, &x64, 1))
runtime·throw("cas64 failed");
if(x64 != 42)
runtime·throw("cas64 failed");
if(!runtime·cas64(&z64, &x64, 1))
runtime·throw("cas64 failed");
if(x64 != 42 || z64 != 1)
runtime·throw("cas64 failed");
if(runtime·atomicload64(&z64) != 1)
runtime·throw("load64 failed");
runtime·atomicstore64(&z64, (1ull<<40)+1);
if(runtime·atomicload64(&z64) != (1ull<<40)+1)
runtime·throw("store64 failed");
if(runtime·xadd64(&z64, (1ull<<40)+1) != (2ull<<40)+2)
runtime·throw("xadd64 failed");
if(runtime·atomicload64(&z64) != (2ull<<40)+2)
runtime·throw("xadd64 failed");
}
void
runtime·check(void)
{
int8 a;
uint8 b;
int16 c;
uint16 d;
int32 e;
uint32 f;
int64 g;
uint64 h;
float32 i, i1;
float64 j, j1;
void* k;
uint16* l;
struct x1 {
byte x;
};
struct y1 {
struct x1 x1;
byte y;
};
if(sizeof(a) != 1) runtime·throw("bad a");
if(sizeof(b) != 1) runtime·throw("bad b");
if(sizeof(c) != 2) runtime·throw("bad c");
if(sizeof(d) != 2) runtime·throw("bad d");
if(sizeof(e) != 4) runtime·throw("bad e");
if(sizeof(f) != 4) runtime·throw("bad f");
if(sizeof(g) != 8) runtime·throw("bad g");
if(sizeof(h) != 8) runtime·throw("bad h");
if(sizeof(i) != 4) runtime·throw("bad i");
if(sizeof(j) != 8) runtime·throw("bad j");
if(sizeof(k) != sizeof(uintptr)) runtime·throw("bad k");
if(sizeof(l) != sizeof(uintptr)) runtime·throw("bad l");
if(sizeof(struct x1) != 1) runtime·throw("bad sizeof x1");
if(offsetof(struct y1, y) != 1) runtime·throw("bad offsetof y1.y");
if(sizeof(struct y1) != 2) runtime·throw("bad sizeof y1");
uint32 z;
z = 1;
if(!runtime·cas(&z, 1, 2))
runtime·throw("cas1");
if(z != 2)
runtime·throw("cas2");
z = 4;
if(runtime·cas(&z, 5, 6))
runtime·throw("cas3");
if(z != 4)
runtime·throw("cas4");
*(uint64*)&j = ~0ULL;
if(j == j)
runtime·throw("float64nan");
if(!(j != j))
runtime·throw("float64nan1");
*(uint64*)&j1 = ~1ULL;
if(j == j1)
runtime·throw("float64nan2");
if(!(j != j1))
runtime·throw("float64nan3");
*(uint32*)&i = ~0UL;
if(i == i)
runtime·throw("float32nan");
if(!(i != i))
runtime·throw("float32nan1");
*(uint32*)&i1 = ~1UL;
if(i == i1)
runtime·throw("float32nan2");
if(!(i != i1))
runtime·throw("float32nan3");
TestAtomic64();
}
void
runtime·Caller(intgo skip, uintptr retpc, String retfile, intgo retline, bool retbool)
{
Func *f, *g;
uintptr pc;
uintptr rpc[2];
/*
* Ask for two PCs: the one we were asked for
* and what it called, so that we can see if it
* "called" sigpanic.
*/
retpc = 0;
if(runtime·callers(1+skip-1, rpc, 2) < 2) {
retfile = runtime·emptystring;
retline = 0;
retbool = false;
} else if((f = runtime·findfunc(rpc[1])) == nil) {
retfile = runtime·emptystring;
retline = 0;
retbool = true; // have retpc at least
} else {
retpc = rpc[1];
retfile = f->src;
pc = retpc;
g = runtime·findfunc(rpc[0]);
if(pc > f->entry && (g == nil || g->entry != (uintptr)runtime·sigpanic))
pc--;
retline = runtime·funcline(f, pc);
retbool = true;
}
FLUSH(&retpc);
FLUSH(&retfile);
FLUSH(&retline);
FLUSH(&retbool);
}
void
runtime·Callers(intgo skip, Slice pc, intgo retn)
{
// runtime.callers uses pc.array==nil as a signal
// to print a stack trace. Pick off 0-length pc here
// so that we don't let a nil pc slice get to it.
if(pc.len == 0)
retn = 0;
else
retn = runtime·callers(skip, (uintptr*)pc.array, pc.len);
FLUSH(&retn);
}
void
runtime·FuncForPC(uintptr pc, void *retf)
{
retf = runtime·findfunc(pc);
FLUSH(&retf);
}
uint32
runtime·fastrand1(void)
{
uint32 x;
x = m->fastrand;
x += x;
if(x & 0x80000000L)
x ^= 0x88888eefUL;
m->fastrand = x;
return x;
}
static Lock ticksLock;
static int64 ticks;
int64
runtime·tickspersecond(void)
{
int64 res, t0, t1, c0, c1;
res = (int64)runtime·atomicload64((uint64*)&ticks);
if(res != 0)
return ticks;
runtime·lock(&ticksLock);
res = ticks;
if(res == 0) {
t0 = runtime·nanotime();
c0 = runtime·cputicks();
runtime·usleep(100*1000);
t1 = runtime·nanotime();
c1 = runtime·cputicks();
if(t1 == t0)
t1++;
res = (c1-c0)*1000*1000*1000/(t1-t0);
if(res == 0)
res++;
runtime·atomicstore64((uint64*)&ticks, res);
}
runtime·unlock(&ticksLock);
return res;
}
void
runtimepprof·runtime_cyclesPerSecond(int64 res)
{
res = runtime·tickspersecond();
FLUSH(&res);
}