mirror of
https://github.com/golang/go
synced 2024-10-04 14:31:21 -06:00
89f185fe8a
The runtime has historically held two dedicated values g (current goroutine) and m (current thread) in 'extern register' slots (TLS on x86, real registers backed by TLS on ARM). This CL removes the extern register m; code now uses g->m. On ARM, this frees up the register that formerly held m (R9). This is important for NaCl, because NaCl ARM code cannot use R9 at all. The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected: BenchmarkBinaryTree17 5491374955 5471024381 -0.37% BenchmarkFannkuch11 4357101311 4275174828 -1.88% BenchmarkGobDecode 11029957 11364184 +3.03% BenchmarkGobEncode 6852205 6784822 -0.98% BenchmarkGzip 650795967 650152275 -0.10% BenchmarkGunzip 140962363 141041670 +0.06% BenchmarkHTTPClientServer 71581 73081 +2.10% BenchmarkJSONEncode 31928079 31913356 -0.05% BenchmarkJSONDecode 117470065 113689916 -3.22% BenchmarkMandelbrot200 6008923 5998712 -0.17% BenchmarkGoParse 6310917 6327487 +0.26% BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17% BenchmarkRegexpMatchHard_1K 168977 169244 +0.16% BenchmarkRevcomp 935294971 914060918 -2.27% BenchmarkTemplate 145917123 148186096 +1.55% Minux previous reported larger variations, but these were caused by run-to-run noise, not repeatable slowdowns. Actual code changes by Minux. I only did the docs and the benchmarking. LGTM=dvyukov, iant, minux R=minux, josharian, iant, dave, bradfitz, dvyukov CC=golang-codereviews https://golang.org/cl/109050043
129 lines
2.9 KiB
Plaintext
129 lines
2.9 KiB
Plaintext
// Copyright 2010 The Go Authors. All rights reserved.
|
||
// Use of this source code is governed by a BSD-style
|
||
// license that can be found in the LICENSE file.
|
||
|
||
package runtime
|
||
#include "runtime.h"
|
||
#include "arch_GOARCH.h"
|
||
#include "type.h"
|
||
|
||
func GOMAXPROCS(n int) (ret int) {
|
||
ret = runtime·gomaxprocsfunc(n);
|
||
}
|
||
|
||
func NumCPU() (ret int) {
|
||
ret = runtime·ncpu;
|
||
}
|
||
|
||
func NumCgoCall() (ret int64) {
|
||
M *mp;
|
||
|
||
ret = 0;
|
||
for(mp=runtime·atomicloadp(&runtime·allm); mp; mp=mp->alllink)
|
||
ret += mp->ncgocall;
|
||
}
|
||
|
||
func newParFor(nthrmax uint32) (desc *ParFor) {
|
||
desc = runtime·parforalloc(nthrmax);
|
||
}
|
||
|
||
func parForSetup(desc *ParFor, nthr uint32, n uint32, ctx *byte, wait bool, body *byte) {
|
||
runtime·parforsetup(desc, nthr, n, ctx, wait, *(void(**)(ParFor*, uint32))body);
|
||
}
|
||
|
||
func parForDo(desc *ParFor) {
|
||
runtime·parfordo(desc);
|
||
}
|
||
|
||
func parForIters(desc *ParFor, tid uintptr) (start uintptr, end uintptr) {
|
||
runtime·parforiters(desc, tid, &start, &end);
|
||
}
|
||
|
||
func gogoBytes() (x int32) {
|
||
x = RuntimeGogoBytes;
|
||
}
|
||
|
||
func typestring(e Eface) (s String) {
|
||
s = *e.type->string;
|
||
}
|
||
|
||
func golockedOSThread() (ret bool) {
|
||
ret = runtime·lockedOSThread();
|
||
}
|
||
|
||
func NumGoroutine() (ret int) {
|
||
ret = runtime·gcount();
|
||
}
|
||
|
||
func getgoroot() (out String) {
|
||
byte *p;
|
||
|
||
p = runtime·getenv("GOROOT");
|
||
out = runtime·gostringnocopy(p);
|
||
}
|
||
|
||
/*
|
||
* We assume that all architectures turn faults and the like
|
||
* into apparent calls to runtime.sigpanic. If we see a "call"
|
||
* to runtime.sigpanic, we do not back up the PC to find the
|
||
* line number of the CALL instruction, because there is no CALL.
|
||
*/
|
||
void runtime·sigpanic(void);
|
||
|
||
func Caller(skip int) (retpc uintptr, retfile String, retline int, retbool bool) {
|
||
Func *f, *g;
|
||
uintptr pc;
|
||
uintptr rpc[2];
|
||
|
||
/*
|
||
* Ask for two PCs: the one we were asked for
|
||
* and what it called, so that we can see if it
|
||
* "called" sigpanic.
|
||
*/
|
||
retpc = 0;
|
||
if(runtime·callers(1+skip-1, rpc, 2) < 2) {
|
||
retfile = runtime·emptystring;
|
||
retline = 0;
|
||
retbool = false;
|
||
} else if((f = runtime·findfunc(rpc[1])) == nil) {
|
||
retfile = runtime·emptystring;
|
||
retline = 0;
|
||
retbool = true; // have retpc at least
|
||
} else {
|
||
retpc = rpc[1];
|
||
pc = retpc;
|
||
g = runtime·findfunc(rpc[0]);
|
||
if(pc > f->entry && (g == nil || g->entry != (uintptr)runtime·sigpanic))
|
||
pc--;
|
||
retline = runtime·funcline(f, pc, &retfile);
|
||
retbool = true;
|
||
}
|
||
}
|
||
|
||
func Callers(skip int, pc Slice) (retn int) {
|
||
// runtime.callers uses pc.array==nil as a signal
|
||
// to print a stack trace. Pick off 0-length pc here
|
||
// so that we don't let a nil pc slice get to it.
|
||
if(pc.len == 0)
|
||
retn = 0;
|
||
else
|
||
retn = runtime·callers(skip, (uintptr*)pc.array, pc.len);
|
||
}
|
||
|
||
func runtime∕pprof·runtime_cyclesPerSecond() (res int64) {
|
||
res = runtime·tickspersecond();
|
||
}
|
||
|
||
func sync·runtime_procPin() (p int) {
|
||
M *mp;
|
||
|
||
mp = g->m;
|
||
// Disable preemption.
|
||
mp->locks++;
|
||
p = mp->p->id;
|
||
}
|
||
|
||
func sync·runtime_procUnpin() {
|
||
g->m->locks--;
|
||
}
|