2010-04-06 00:44:05 -06:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
#define WIN32_LEAN_AND_MEAN
|
|
|
|
#include <windows.h>
|
2012-11-17 11:06:08 -07:00
|
|
|
#include <process.h>
|
2014-01-21 23:30:10 -07:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
2010-04-06 00:44:05 -06:00
|
|
|
#include "libcgo.h"
|
|
|
|
|
2012-11-17 11:06:08 -07:00
|
|
|
static void threadentry(void*);
|
2010-04-06 00:44:05 -06:00
|
|
|
|
2011-12-07 06:53:17 -07:00
|
|
|
/* 1MB is default stack size for 32-bit Windows.
|
|
|
|
Allocation granularity on Windows is typically 64 KB.
|
|
|
|
The constant is also hardcoded in cmd/ld/pe.c (keep synchronized). */
|
2010-04-06 00:44:05 -06:00
|
|
|
#define STACKSIZE (1*1024*1024)
|
|
|
|
|
2013-02-28 14:24:38 -07:00
|
|
|
void
|
|
|
|
x_cgo_init(G *g)
|
2010-04-06 00:44:05 -06:00
|
|
|
{
|
2011-11-09 13:11:48 -07:00
|
|
|
int tmp;
|
runtime: assume precisestack, copystack, StackCopyAlways, ScanStackByFrames
Commit to stack copying for stack growth.
We're carrying around a surprising amount of cruft from older schemes.
I am confident that precise stack scans and stack copying are here to stay.
Delete fallback code for when precise stack info is disabled.
Delete fallback code for when copying stacks is disabled.
Delete fallback code for when StackCopyAlways is disabled.
Delete Stktop chain - there is only one stack segment now.
Delete M.moreargp, M.moreargsize, M.moreframesize, M.cret.
Delete G.writenbuf (unrelated, just dead).
Delete runtime.lessstack, runtime.oldstack.
Delete many amd64 morestack variants.
Delete initialization of morestack frame/arg sizes (shortens split prologue!).
Replace G's stackguard/stackbase/stack0/stacksize/
syscallstack/syscallguard/forkstackguard with simple stack
bounds (lo, hi).
Update liblink, runtime/cgo for adjustments to G.
LGTM=khr
R=khr, bradfitz
CC=golang-codereviews, iant, r
https://golang.org/cl/137410043
2014-09-09 11:39:57 -06:00
|
|
|
g->stacklo = (uintptr)&tmp - STACKSIZE + 8*1024;
|
2010-04-06 00:44:05 -06:00
|
|
|
}
|
|
|
|
|
2010-12-08 14:35:05 -07:00
|
|
|
|
2010-04-06 00:44:05 -06:00
|
|
|
void
|
2013-02-28 14:24:38 -07:00
|
|
|
_cgo_sys_thread_start(ThreadStart *ts)
|
2010-04-06 00:44:05 -06:00
|
|
|
{
|
2014-01-21 23:30:10 -07:00
|
|
|
uintptr_t thandle;
|
|
|
|
|
|
|
|
thandle = _beginthread(threadentry, 0, ts);
|
|
|
|
if(thandle == -1) {
|
|
|
|
fprintf(stderr, "runtime: failed to create new OS thread (%d)\n", errno);
|
|
|
|
abort();
|
|
|
|
}
|
2010-04-06 00:44:05 -06:00
|
|
|
}
|
|
|
|
|
2012-11-17 11:06:08 -07:00
|
|
|
static void
|
2010-04-06 00:44:05 -06:00
|
|
|
threadentry(void *v)
|
|
|
|
{
|
2010-09-27 07:44:56 -06:00
|
|
|
ThreadStart ts;
|
|
|
|
|
|
|
|
ts = *(ThreadStart*)v;
|
|
|
|
free(v);
|
|
|
|
|
runtime: assume precisestack, copystack, StackCopyAlways, ScanStackByFrames
Commit to stack copying for stack growth.
We're carrying around a surprising amount of cruft from older schemes.
I am confident that precise stack scans and stack copying are here to stay.
Delete fallback code for when precise stack info is disabled.
Delete fallback code for when copying stacks is disabled.
Delete fallback code for when StackCopyAlways is disabled.
Delete Stktop chain - there is only one stack segment now.
Delete M.moreargp, M.moreargsize, M.moreframesize, M.cret.
Delete G.writenbuf (unrelated, just dead).
Delete runtime.lessstack, runtime.oldstack.
Delete many amd64 morestack variants.
Delete initialization of morestack frame/arg sizes (shortens split prologue!).
Replace G's stackguard/stackbase/stack0/stacksize/
syscallstack/syscallguard/forkstackguard with simple stack
bounds (lo, hi).
Update liblink, runtime/cgo for adjustments to G.
LGTM=khr
R=khr, bradfitz
CC=golang-codereviews, iant, r
https://golang.org/cl/137410043
2014-09-09 11:39:57 -06:00
|
|
|
ts.g->stackhi = (uintptr)&ts;
|
|
|
|
ts.g->stacklo = (uintptr)&ts - STACKSIZE + 8*1024;
|
2010-09-27 07:44:56 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set specific keys in thread local storage.
|
|
|
|
*/
|
|
|
|
asm volatile (
|
2012-01-08 17:23:07 -07:00
|
|
|
"movl %0, %%fs:0x14\n" // MOVL tls0, 0x14(FS)
|
|
|
|
"movl %%fs:0x14, %%eax\n" // MOVL 0x14(FS), tmp
|
2011-01-20 08:22:20 -07:00
|
|
|
"movl %1, 0(%%eax)\n" // MOVL g, 0(FS)
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
:: "r"(ts.tls), "r"(ts.g) : "%eax"
|
2010-09-27 07:44:56 -06:00
|
|
|
);
|
|
|
|
|
|
|
|
crosscall_386(ts.fn);
|
2010-04-06 00:44:05 -06:00
|
|
|
}
|