2008-06-30 12:50:36 -06:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2011-12-19 13:51:13 -07:00
|
|
|
#include "zasm_GOOS_GOARCH.h"
|
2013-07-16 14:24:09 -06:00
|
|
|
#include "funcdata.h"
|
2013-08-07 11:23:24 -06:00
|
|
|
#include "../../cmd/ld/textflag.h"
|
2008-06-30 12:50:36 -06:00
|
|
|
|
2014-09-03 09:11:16 -06:00
|
|
|
TEXT runtime·rt0_go(SB),NOSPLIT,$0
|
2008-06-30 12:50:36 -06:00
|
|
|
// copy arguments forward on an even stack
|
2013-03-06 13:03:04 -07:00
|
|
|
MOVQ DI, AX // argc
|
|
|
|
MOVQ SI, BX // argv
|
2008-06-30 12:50:36 -06:00
|
|
|
SUBQ $(4*8+7), SP // 2args 2auto
|
2010-04-09 15:15:15 -06:00
|
|
|
ANDQ $~15, SP
|
2008-06-30 12:50:36 -06:00
|
|
|
MOVQ AX, 16(SP)
|
|
|
|
MOVQ BX, 24(SP)
|
2011-12-07 06:53:17 -07:00
|
|
|
|
|
|
|
// create istack out of the given (operating system) stack.
|
2013-02-28 14:24:38 -07:00
|
|
|
// _cgo_init may update stackguard.
|
2011-12-07 06:53:17 -07:00
|
|
|
MOVQ $runtime·g0(SB), DI
|
2012-01-19 18:59:44 -07:00
|
|
|
LEAQ (-64*1024+104)(SP), BX
|
2011-12-07 06:53:17 -07:00
|
|
|
MOVQ BX, g_stackguard(DI)
|
2013-06-03 02:28:24 -06:00
|
|
|
MOVQ BX, g_stackguard0(DI)
|
2011-12-07 06:53:17 -07:00
|
|
|
MOVQ SP, g_stackbase(DI)
|
2008-06-30 12:50:36 -06:00
|
|
|
|
2013-03-12 11:47:44 -06:00
|
|
|
// find out information about the processor we're on
|
|
|
|
MOVQ $0, AX
|
|
|
|
CPUID
|
|
|
|
CMPQ AX, $0
|
|
|
|
JE nocpuinfo
|
|
|
|
MOVQ $1, AX
|
|
|
|
CPUID
|
|
|
|
MOVL CX, runtime·cpuid_ecx(SB)
|
|
|
|
MOVL DX, runtime·cpuid_edx(SB)
|
|
|
|
nocpuinfo:
|
|
|
|
|
2013-02-28 14:24:38 -07:00
|
|
|
// if there is an _cgo_init, call it.
|
|
|
|
MOVQ _cgo_init(SB), AX
|
2010-04-09 15:15:15 -06:00
|
|
|
TESTQ AX, AX
|
2010-08-04 18:50:22 -06:00
|
|
|
JZ needtls
|
2012-01-19 18:59:44 -07:00
|
|
|
// g0 already in DI
|
|
|
|
MOVQ DI, CX // Win64 uses CX for first parameter
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ $setg_gcc<>(SB), SI
|
2012-01-19 18:59:44 -07:00
|
|
|
CALL AX
|
2013-06-03 02:28:24 -06:00
|
|
|
// update stackguard after _cgo_init
|
|
|
|
MOVQ $runtime·g0(SB), CX
|
|
|
|
MOVQ g_stackguard0(CX), AX
|
|
|
|
MOVQ AX, g_stackguard(CX)
|
2011-07-19 08:47:33 -06:00
|
|
|
CMPL runtime·iswindows(SB), $0
|
|
|
|
JEQ ok
|
2010-08-04 18:50:22 -06:00
|
|
|
|
|
|
|
needtls:
|
2012-08-31 11:21:13 -06:00
|
|
|
// skip TLS setup on Plan 9
|
|
|
|
CMPL runtime·isplan9(SB), $1
|
|
|
|
JEQ ok
|
2014-01-16 21:58:10 -07:00
|
|
|
// skip TLS setup on Solaris
|
|
|
|
CMPL runtime·issolaris(SB), $1
|
|
|
|
JEQ ok
|
2012-08-31 11:21:13 -06:00
|
|
|
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
LEAQ runtime·tls0(SB), DI
|
|
|
|
CALL runtime·settls(SB)
|
2010-08-04 18:50:22 -06:00
|
|
|
|
|
|
|
// store through it, to make sure it works
|
|
|
|
get_tls(BX)
|
|
|
|
MOVQ $0x123, g(BX)
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
MOVQ runtime·tls0(SB), AX
|
2010-08-04 18:50:22 -06:00
|
|
|
CMPQ AX, $0x123
|
|
|
|
JEQ 2(PC)
|
|
|
|
MOVL AX, 0 // abort
|
|
|
|
ok:
|
|
|
|
// set the per-goroutine and per-mach "registers"
|
|
|
|
get_tls(BX)
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
LEAQ runtime·g0(SB), CX
|
2010-08-04 18:50:22 -06:00
|
|
|
MOVQ CX, g(BX)
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
LEAQ runtime·m0(SB), AX
|
2010-08-04 18:50:22 -06:00
|
|
|
|
|
|
|
// save m->g0 = g0
|
|
|
|
MOVQ CX, m_g0(AX)
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
// save m0 to g0->m
|
|
|
|
MOVQ AX, g_m(CX)
|
2008-06-30 12:50:36 -06:00
|
|
|
|
2008-12-15 16:07:35 -07:00
|
|
|
CLD // convention is D is always left cleared
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
CALL runtime·check(SB)
|
2008-06-30 12:50:36 -06:00
|
|
|
|
|
|
|
MOVL 16(SP), AX // copy argc
|
|
|
|
MOVL AX, 0(SP)
|
|
|
|
MOVQ 24(SP), AX // copy argv
|
|
|
|
MOVQ AX, 8(SP)
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
CALL runtime·args(SB)
|
|
|
|
CALL runtime·osinit(SB)
|
|
|
|
CALL runtime·schedinit(SB)
|
2008-08-05 15:21:42 -06:00
|
|
|
|
2008-07-11 20:16:39 -06:00
|
|
|
// create a new goroutine to start program
|
2014-08-12 17:51:20 -06:00
|
|
|
MOVQ $runtime·main·f(SB), BP // entry
|
|
|
|
PUSHQ BP
|
2009-06-17 16:12:16 -06:00
|
|
|
PUSHQ $0 // arg size
|
2013-07-16 14:24:09 -06:00
|
|
|
ARGSIZE(16)
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
CALL runtime·newproc(SB)
|
2013-07-16 14:24:09 -06:00
|
|
|
ARGSIZE(-1)
|
2008-07-11 20:16:39 -06:00
|
|
|
POPQ AX
|
|
|
|
POPQ AX
|
2008-12-04 09:30:54 -07:00
|
|
|
|
2008-09-22 14:47:59 -06:00
|
|
|
// start this M
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
CALL runtime·mstart(SB)
|
2008-06-30 12:50:36 -06:00
|
|
|
|
2012-03-08 12:03:56 -07:00
|
|
|
MOVL $0xf1, 0xf1 // crash
|
2008-06-30 12:50:36 -06:00
|
|
|
RET
|
|
|
|
|
2013-02-21 15:01:13 -07:00
|
|
|
DATA runtime·main·f+0(SB)/8,$runtime·main(SB)
|
2013-08-07 11:23:24 -06:00
|
|
|
GLOBL runtime·main·f(SB),RODATA,$8
|
2013-02-21 15:01:13 -07:00
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·breakpoint(SB),NOSPLIT,$0-0
|
2008-07-11 20:16:39 -06:00
|
|
|
BYTE $0xcc
|
2008-06-30 12:50:36 -06:00
|
|
|
RET
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·asminit(SB),NOSPLIT,$0-0
|
2012-02-13 23:23:15 -07:00
|
|
|
// No per-thread init.
|
|
|
|
RET
|
|
|
|
|
2008-07-11 20:16:39 -06:00
|
|
|
/*
|
|
|
|
* go-routine
|
|
|
|
*/
|
2008-07-07 18:59:32 -06:00
|
|
|
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
// void gosave(Gobuf*)
|
2009-06-17 16:12:16 -06:00
|
|
|
// save state in Gobuf; setjmp
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·gosave(SB), NOSPLIT, $0-8
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ buf+0(FP), AX // gobuf
|
|
|
|
LEAQ buf+0(FP), BX // caller's SP
|
2009-06-17 16:12:16 -06:00
|
|
|
MOVQ BX, gobuf_sp(AX)
|
|
|
|
MOVQ 0(SP), BX // caller's PC
|
|
|
|
MOVQ BX, gobuf_pc(AX)
|
2013-06-12 13:22:26 -06:00
|
|
|
MOVQ $0, gobuf_ret(AX)
|
|
|
|
MOVQ $0, gobuf_ctxt(AX)
|
2010-08-04 18:50:22 -06:00
|
|
|
get_tls(CX)
|
|
|
|
MOVQ g(CX), BX
|
|
|
|
MOVQ BX, gobuf_g(AX)
|
2008-06-30 12:50:36 -06:00
|
|
|
RET
|
|
|
|
|
2013-06-12 16:05:10 -06:00
|
|
|
// void gogo(Gobuf*)
|
2009-06-17 16:12:16 -06:00
|
|
|
// restore state from Gobuf; longjmp
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·gogo(SB), NOSPLIT, $0-8
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ buf+0(FP), BX // gobuf
|
2010-08-04 18:50:22 -06:00
|
|
|
MOVQ gobuf_g(BX), DX
|
|
|
|
MOVQ 0(DX), CX // make sure g != nil
|
|
|
|
get_tls(CX)
|
|
|
|
MOVQ DX, g(CX)
|
2009-06-17 16:12:16 -06:00
|
|
|
MOVQ gobuf_sp(BX), SP // restore SP
|
2013-06-12 13:22:26 -06:00
|
|
|
MOVQ gobuf_ret(BX), AX
|
|
|
|
MOVQ gobuf_ctxt(BX), DX
|
|
|
|
MOVQ $0, gobuf_sp(BX) // clear to help garbage collector
|
|
|
|
MOVQ $0, gobuf_ret(BX)
|
|
|
|
MOVQ $0, gobuf_ctxt(BX)
|
2009-06-17 16:12:16 -06:00
|
|
|
MOVQ gobuf_pc(BX), BX
|
|
|
|
JMP BX
|
|
|
|
|
2014-09-03 09:35:22 -06:00
|
|
|
// func mcall(fn func(*g))
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
// Switch to m->g0's stack, call fn(g).
|
runtime: stack split + garbage collection bug
The g->sched.sp saved stack pointer and the
g->stackbase and g->stackguard stack bounds
can change even while "the world is stopped",
because a goroutine has to call functions (and
therefore might split its stack) when exiting a
system call to check whether the world is stopped
(and if so, wait until the world continues).
That means the garbage collector cannot access
those values safely (without a race) for goroutines
executing system calls. Instead, save a consistent
triple in g->gcsp, g->gcstack, g->gcguard during
entersyscall and have the garbage collector refer
to those.
The old code was occasionally seeing (because of
the race) an sp and stk that did not correspond to
each other, so that stk - sp was not the number of
stack bytes following sp. In that case, if sp < stk
then the call scanblock(sp, stk - sp) scanned too
many bytes (anything between the two pointers,
which pointed into different allocation blocks).
If sp > stk then stk - sp wrapped around.
On 32-bit, stk - sp is a uintptr (uint32) converted
to int64 in the call to scanblock, so a large (~4G)
but positive number. Scanblock would try to scan
that many bytes and eventually fault accessing
unmapped memory. On 64-bit, stk - sp is a uintptr (uint64)
promoted to int64 in the call to scanblock, so a negative
number. Scanblock would not scan anything, possibly
causing in-use blocks to be freed.
In short, 32-bit platforms would have seen either
ineffective garbage collection or crashes during garbage
collection, while 64-bit platforms would have seen
either ineffective or incorrect garbage collection.
You can see the invalid arguments to scanblock in the
stack traces in issue 1620.
Fixes #1620.
Fixes #1746.
R=iant, r
CC=golang-dev
https://golang.org/cl/4437075
2011-04-27 21:21:12 -06:00
|
|
|
// Fn must never return. It should gogo(&g->sched)
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
// to keep running g.
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·mcall(SB), NOSPLIT, $0-8
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ fn+0(FP), DI
|
|
|
|
|
|
|
|
get_tls(CX)
|
2013-06-05 05:16:53 -06:00
|
|
|
MOVQ g(CX), AX // save state in g->sched
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ 0(SP), BX // caller's PC
|
|
|
|
MOVQ BX, (g_sched+gobuf_pc)(AX)
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
LEAQ fn+0(FP), BX // caller's SP
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ BX, (g_sched+gobuf_sp)(AX)
|
|
|
|
MOVQ AX, (g_sched+gobuf_g)(AX)
|
|
|
|
|
|
|
|
// switch to m->g0 & its stack, call fn
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ g(CX), BX
|
|
|
|
MOVQ g_m(BX), BX
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ m_g0(BX), SI
|
|
|
|
CMPQ SI, AX // if g == m->g0 call badmcall
|
2013-07-16 14:24:09 -06:00
|
|
|
JNE 3(PC)
|
2013-08-29 16:53:34 -06:00
|
|
|
MOVQ $runtime·badmcall(SB), AX
|
|
|
|
JMP AX
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ SI, g(CX) // g = m->g0
|
2013-06-05 05:16:53 -06:00
|
|
|
MOVQ (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
PUSHQ AX
|
2013-07-16 14:24:09 -06:00
|
|
|
ARGSIZE(8)
|
2014-09-03 09:35:22 -06:00
|
|
|
MOVQ DI, DX
|
|
|
|
MOVQ 0(DI), DI
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
CALL DI
|
|
|
|
POPQ AX
|
2013-08-29 16:53:34 -06:00
|
|
|
MOVQ $runtime·badmcall2(SB), AX
|
|
|
|
JMP AX
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
RET
|
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
// switchtoM is a dummy routine that onM leaves at the bottom
|
|
|
|
// of the G stack. We need to distinguish the routine that
|
|
|
|
// lives at the bottom of the G stack from the one that lives
|
|
|
|
// at the top of the M stack because the one at the top of
|
|
|
|
// the M stack terminates the stack walk (see topofstack()).
|
|
|
|
TEXT runtime·switchtoM(SB), NOSPLIT, $0-8
|
|
|
|
RET
|
|
|
|
|
2014-09-03 09:35:22 -06:00
|
|
|
// func onM(fn func())
|
2014-07-30 10:01:52 -06:00
|
|
|
// calls fn() on the M stack.
|
|
|
|
// switches to the M stack if not already on it, and
|
|
|
|
// switches back when fn() returns.
|
|
|
|
TEXT runtime·onM(SB), NOSPLIT, $0-8
|
|
|
|
MOVQ fn+0(FP), DI // DI = fn
|
|
|
|
get_tls(CX)
|
|
|
|
MOVQ g(CX), AX // AX = g
|
|
|
|
MOVQ g_m(AX), BX // BX = m
|
|
|
|
MOVQ m_g0(BX), DX // DX = g0
|
|
|
|
CMPQ AX, DX
|
|
|
|
JEQ onm
|
|
|
|
|
|
|
|
// save our state in g->sched. Pretend to
|
|
|
|
// be switchtoM if the G stack is scanned.
|
2014-08-12 17:51:20 -06:00
|
|
|
MOVQ $runtime·switchtoM(SB), BP
|
|
|
|
MOVQ BP, (g_sched+gobuf_pc)(AX)
|
2014-07-30 10:01:52 -06:00
|
|
|
MOVQ SP, (g_sched+gobuf_sp)(AX)
|
|
|
|
MOVQ AX, (g_sched+gobuf_g)(AX)
|
|
|
|
|
|
|
|
// switch to g0
|
|
|
|
MOVQ DX, g(CX)
|
|
|
|
MOVQ (g_sched+gobuf_sp)(DX), SP
|
|
|
|
|
|
|
|
// call target function
|
|
|
|
ARGSIZE(0)
|
2014-09-03 09:35:22 -06:00
|
|
|
MOVQ DI, DX
|
|
|
|
MOVQ 0(DI), DI
|
2014-07-30 10:01:52 -06:00
|
|
|
CALL DI
|
|
|
|
|
|
|
|
// switch back to g
|
|
|
|
get_tls(CX)
|
|
|
|
MOVQ g(CX), AX
|
|
|
|
MOVQ g_m(AX), BX
|
|
|
|
MOVQ m_curg(BX), AX
|
|
|
|
MOVQ AX, g(CX)
|
|
|
|
MOVQ (g_sched+gobuf_sp)(AX), SP
|
|
|
|
MOVQ $0, (g_sched+gobuf_sp)(AX)
|
|
|
|
RET
|
|
|
|
|
|
|
|
onm:
|
|
|
|
// already on m stack, just call directly
|
2014-09-03 09:35:22 -06:00
|
|
|
MOVQ DI, DX
|
|
|
|
MOVQ 0(DI), DI
|
2014-07-30 10:01:52 -06:00
|
|
|
CALL DI
|
|
|
|
RET
|
|
|
|
|
2008-07-12 12:30:53 -06:00
|
|
|
/*
|
|
|
|
* support for morestack
|
|
|
|
*/
|
|
|
|
|
2009-06-17 16:12:16 -06:00
|
|
|
// Called during function prolog when more stack is needed.
|
2010-08-04 18:50:22 -06:00
|
|
|
// Caller has already done get_tls(CX); MOVQ m(CX), BX.
|
2013-07-18 14:53:45 -06:00
|
|
|
//
|
|
|
|
// The traceback routines see morestack on a g0 as being
|
|
|
|
// the top of a stack (for example, morestack calling newstack
|
|
|
|
// calling the scheduler calling newm calling gc), so we must
|
|
|
|
// record an argument size. For that purpose, it has no arguments.
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·morestack(SB),NOSPLIT,$0-0
|
2010-08-04 18:50:22 -06:00
|
|
|
// Cannot grow scheduler stack (m->g0).
|
|
|
|
MOVQ m_g0(BX), SI
|
|
|
|
CMPQ g(CX), SI
|
|
|
|
JNE 2(PC)
|
|
|
|
INT $3
|
|
|
|
|
2009-06-17 16:12:16 -06:00
|
|
|
// Called from f.
|
|
|
|
// Set m->morebuf to f's caller.
|
|
|
|
MOVQ 8(SP), AX // f's caller's PC
|
2010-08-04 18:50:22 -06:00
|
|
|
MOVQ AX, (m_morebuf+gobuf_pc)(BX)
|
2009-06-17 16:12:16 -06:00
|
|
|
LEAQ 16(SP), AX // f's caller's SP
|
2010-08-04 18:50:22 -06:00
|
|
|
MOVQ AX, (m_morebuf+gobuf_sp)(BX)
|
2011-01-14 12:05:20 -07:00
|
|
|
MOVQ AX, m_moreargp(BX)
|
2010-08-04 18:50:22 -06:00
|
|
|
get_tls(CX)
|
|
|
|
MOVQ g(CX), SI
|
|
|
|
MOVQ SI, (m_morebuf+gobuf_g)(BX)
|
2009-06-17 16:12:16 -06:00
|
|
|
|
runtime: record proper goroutine state during stack split
Until now, the goroutine state has been scattered during the
execution of newstack and oldstack. It's all there, and those routines
know how to get back to a working goroutine, but other pieces of
the system, like stack traces, do not. If something does interrupt
the newstack or oldstack execution, the rest of the system can't
understand the goroutine. For example, if newstack decides there
is an overflow and calls throw, the stack tracer wouldn't dump the
goroutine correctly.
For newstack to save a useful state snapshot, it needs to be able
to rewind the PC in the function that triggered the split back to
the beginning of the function. (The PC is a few instructions in, just
after the call to morestack.) To make that possible, we change the
prologues to insert a jmp back to the beginning of the function
after the call to morestack. That is, the prologue used to be roughly:
TEXT myfunc
check for split
jmpcond nosplit
call morestack
nosplit:
sub $xxx, sp
Now an extra instruction is inserted after the call:
TEXT myfunc
start:
check for split
jmpcond nosplit
call morestack
jmp start
nosplit:
sub $xxx, sp
The jmp is not executed directly. It is decoded and simulated by
runtime.rewindmorestack to discover the beginning of the function,
and then the call to morestack returns directly to the start label
instead of to the jump instruction. So logically the jmp is still
executed, just not by the cpu.
The prologue thus repeats in the case of a function that needs a
stack split, but against the cost of the split itself, the extra few
instructions are noise. The repeated prologue has the nice effect of
making a stack split double-check that the new stack is big enough:
if morestack happens to return on a too-small stack, we'll now notice
before corruption happens.
The ability for newstack to rewind to the beginning of the function
should help preemption too. If newstack decides that it was called
for preemption instead of a stack split, it now has the goroutine state
correctly paused if rescheduling is needed, and when the goroutine
can run again, it can return to the start label on its original stack
and re-execute the split check.
Here is an example of a split stack overflow showing the full
trace, without any special cases in the stack printer.
(This one was triggered by making the split check incorrect.)
runtime: newstack framesize=0x0 argsize=0x18 sp=0x6aebd0 stack=[0x6b0000, 0x6b0fa0]
morebuf={pc:0x69f5b sp:0x6aebd8 lr:0x0}
sched={pc:0x68880 sp:0x6aebd0 lr:0x0 ctxt:0x34e700}
runtime: split stack overflow: 0x6aebd0 < 0x6b0000
fatal error: runtime: split stack overflow
goroutine 1 [stack split]:
runtime.mallocgc(0x290, 0x100000000, 0x1)
/Users/rsc/g/go/src/pkg/runtime/zmalloc_darwin_amd64.c:21 fp=0x6aebd8
runtime.new()
/Users/rsc/g/go/src/pkg/runtime/zmalloc_darwin_amd64.c:682 +0x5b fp=0x6aec08
go/build.(*Context).Import(0x5ae340, 0xc210030c71, 0xa, 0xc2100b4380, 0x1b, ...)
/Users/rsc/g/go/src/pkg/go/build/build.go:424 +0x3a fp=0x6b00a0
main.loadImport(0xc210030c71, 0xa, 0xc2100b4380, 0x1b, 0xc2100b42c0, ...)
/Users/rsc/g/go/src/cmd/go/pkg.go:249 +0x371 fp=0x6b01a8
main.(*Package).load(0xc21017c800, 0xc2100b42c0, 0xc2101828c0, 0x0, 0x0, ...)
/Users/rsc/g/go/src/cmd/go/pkg.go:431 +0x2801 fp=0x6b0c98
main.loadPackage(0x369040, 0x7, 0xc2100b42c0, 0x0)
/Users/rsc/g/go/src/cmd/go/pkg.go:709 +0x857 fp=0x6b0f80
----- stack segment boundary -----
main.(*builder).action(0xc2100902a0, 0x0, 0x0, 0xc2100e6c00, 0xc2100e5750, ...)
/Users/rsc/g/go/src/cmd/go/build.go:539 +0x437 fp=0x6b14a0
main.(*builder).action(0xc2100902a0, 0x0, 0x0, 0xc21015b400, 0x2, ...)
/Users/rsc/g/go/src/cmd/go/build.go:528 +0x1d2 fp=0x6b1658
main.(*builder).test(0xc2100902a0, 0xc210092000, 0x0, 0x0, 0xc21008ff60, ...)
/Users/rsc/g/go/src/cmd/go/test.go:622 +0x1b53 fp=0x6b1f68
----- stack segment boundary -----
main.runTest(0x5a6b20, 0xc21000a020, 0x2, 0x2)
/Users/rsc/g/go/src/cmd/go/test.go:366 +0xd09 fp=0x6a5cf0
main.main()
/Users/rsc/g/go/src/cmd/go/main.go:161 +0x4f9 fp=0x6a5f78
runtime.main()
/Users/rsc/g/go/src/pkg/runtime/proc.c:183 +0x92 fp=0x6a5fa0
runtime.goexit()
/Users/rsc/g/go/src/pkg/runtime/proc.c:1266 fp=0x6a5fa8
And here is a seg fault during oldstack:
SIGSEGV: segmentation violation
PC=0x1b2a6
runtime.oldstack()
/Users/rsc/g/go/src/pkg/runtime/stack.c:159 +0x76
runtime.lessstack()
/Users/rsc/g/go/src/pkg/runtime/asm_amd64.s:270 +0x22
goroutine 1 [stack unsplit]:
fmt.(*pp).printArg(0x2102e64e0, 0xe5c80, 0x2102c9220, 0x73, 0x0, ...)
/Users/rsc/g/go/src/pkg/fmt/print.go:818 +0x3d3 fp=0x221031e6f8
fmt.(*pp).doPrintf(0x2102e64e0, 0x12fb20, 0x2, 0x221031eb98, 0x1, ...)
/Users/rsc/g/go/src/pkg/fmt/print.go:1183 +0x15cb fp=0x221031eaf0
fmt.Sprintf(0x12fb20, 0x2, 0x221031eb98, 0x1, 0x1, ...)
/Users/rsc/g/go/src/pkg/fmt/print.go:234 +0x67 fp=0x221031eb40
flag.(*stringValue).String(0x2102c9210, 0x1, 0x0)
/Users/rsc/g/go/src/pkg/flag/flag.go:180 +0xb3 fp=0x221031ebb0
flag.(*FlagSet).Var(0x2102f6000, 0x293d38, 0x2102c9210, 0x143490, 0xa, ...)
/Users/rsc/g/go/src/pkg/flag/flag.go:633 +0x40 fp=0x221031eca0
flag.(*FlagSet).StringVar(0x2102f6000, 0x2102c9210, 0x143490, 0xa, 0x12fa60, ...)
/Users/rsc/g/go/src/pkg/flag/flag.go:550 +0x91 fp=0x221031ece8
flag.(*FlagSet).String(0x2102f6000, 0x143490, 0xa, 0x12fa60, 0x0, ...)
/Users/rsc/g/go/src/pkg/flag/flag.go:563 +0x87 fp=0x221031ed38
flag.String(0x143490, 0xa, 0x12fa60, 0x0, 0x161950, ...)
/Users/rsc/g/go/src/pkg/flag/flag.go:570 +0x6b fp=0x221031ed80
testing.init()
/Users/rsc/g/go/src/pkg/testing/testing.go:-531 +0xbb fp=0x221031edc0
strings_test.init()
/Users/rsc/g/go/src/pkg/strings/strings_test.go:1115 +0x62 fp=0x221031ef70
main.init()
strings/_test/_testmain.go:90 +0x3d fp=0x221031ef78
runtime.main()
/Users/rsc/g/go/src/pkg/runtime/proc.c:180 +0x8a fp=0x221031efa0
runtime.goexit()
/Users/rsc/g/go/src/pkg/runtime/proc.c:1269 fp=0x221031efa8
goroutine 2 [runnable]:
runtime.MHeap_Scavenger()
/Users/rsc/g/go/src/pkg/runtime/mheap.c:438
runtime.goexit()
/Users/rsc/g/go/src/pkg/runtime/proc.c:1269
created by runtime.main
/Users/rsc/g/go/src/pkg/runtime/proc.c:166
rax 0x23ccc0
rbx 0x23ccc0
rcx 0x0
rdx 0x38
rdi 0x2102c0170
rsi 0x221032cfe0
rbp 0x221032cfa0
rsp 0x7fff5fbff5b0
r8 0x2102c0120
r9 0x221032cfa0
r10 0x221032c000
r11 0x104ce8
r12 0xe5c80
r13 0x1be82baac718
r14 0x13091135f7d69200
r15 0x0
rip 0x1b2a6
rflags 0x10246
cs 0x2b
fs 0x0
gs 0x0
Fixes #5723.
R=r, dvyukov, go.peter.90, dave, iant
CC=golang-dev
https://golang.org/cl/10360048
2013-06-27 09:32:01 -06:00
|
|
|
// Set g->sched to context in f.
|
|
|
|
MOVQ 0(SP), AX // f's PC
|
|
|
|
MOVQ AX, (g_sched+gobuf_pc)(SI)
|
|
|
|
MOVQ SI, (g_sched+gobuf_g)(SI)
|
|
|
|
LEAQ 8(SP), AX // f's SP
|
|
|
|
MOVQ AX, (g_sched+gobuf_sp)(SI)
|
|
|
|
MOVQ DX, (g_sched+gobuf_ctxt)(SI)
|
2009-06-17 16:12:16 -06:00
|
|
|
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
// Call newstack on m->g0's stack.
|
2010-08-04 18:50:22 -06:00
|
|
|
MOVQ m_g0(BX), BP
|
|
|
|
MOVQ BP, g(CX)
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ (g_sched+gobuf_sp)(BP), SP
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
CALL runtime·newstack(SB)
|
2009-06-17 16:12:16 -06:00
|
|
|
MOVQ $0, 0x1003 // crash if newstack returns
|
2009-07-08 19:16:09 -06:00
|
|
|
RET
|
|
|
|
|
2013-08-02 14:03:14 -06:00
|
|
|
// Called from panic. Mimics morestack,
|
2009-07-08 19:16:09 -06:00
|
|
|
// reuses stack growth code to create a frame
|
|
|
|
// with the desired args running the desired function.
|
|
|
|
//
|
|
|
|
// func call(fn *byte, arg *byte, argsize uint32).
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·newstackcall(SB), NOSPLIT, $0-20
|
2010-08-04 18:50:22 -06:00
|
|
|
get_tls(CX)
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ g(CX), BX
|
|
|
|
MOVQ g_m(BX), BX
|
2010-08-04 18:50:22 -06:00
|
|
|
|
2009-07-08 19:16:09 -06:00
|
|
|
// Save our caller's state as the PC and SP to
|
|
|
|
// restore when returning from f.
|
|
|
|
MOVQ 0(SP), AX // our caller's PC
|
2010-08-04 18:50:22 -06:00
|
|
|
MOVQ AX, (m_morebuf+gobuf_pc)(BX)
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
LEAQ fv+0(FP), AX // our caller's SP
|
2010-08-04 18:50:22 -06:00
|
|
|
MOVQ AX, (m_morebuf+gobuf_sp)(BX)
|
|
|
|
MOVQ g(CX), AX
|
|
|
|
MOVQ AX, (m_morebuf+gobuf_g)(BX)
|
2013-06-27 14:51:06 -06:00
|
|
|
|
|
|
|
// Save our own state as the PC and SP to restore
|
|
|
|
// if this goroutine needs to be restarted.
|
2014-08-12 17:51:20 -06:00
|
|
|
MOVQ $runtime·newstackcall(SB), BP
|
|
|
|
MOVQ BP, (g_sched+gobuf_pc)(AX)
|
2013-06-27 14:51:06 -06:00
|
|
|
MOVQ SP, (g_sched+gobuf_sp)(AX)
|
2009-07-08 19:16:09 -06:00
|
|
|
|
|
|
|
// Set up morestack arguments to call f on a new stack.
|
2010-03-29 22:48:22 -06:00
|
|
|
// We set f's frame size to 1, as a hint to newstack
|
2013-08-02 14:03:14 -06:00
|
|
|
// that this is a call from runtime·newstackcall.
|
2010-03-29 22:48:22 -06:00
|
|
|
// If it turns out that f needs a larger frame than
|
|
|
|
// the default stack, f's usual stack growth prolog will
|
|
|
|
// allocate a new segment (and recopy the arguments).
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ fv+0(FP), AX // fn
|
|
|
|
MOVQ addr+8(FP), DX // arg frame
|
|
|
|
MOVL size+16(FP), CX // arg size
|
2009-07-08 19:16:09 -06:00
|
|
|
|
runtime: record proper goroutine state during stack split
Until now, the goroutine state has been scattered during the
execution of newstack and oldstack. It's all there, and those routines
know how to get back to a working goroutine, but other pieces of
the system, like stack traces, do not. If something does interrupt
the newstack or oldstack execution, the rest of the system can't
understand the goroutine. For example, if newstack decides there
is an overflow and calls throw, the stack tracer wouldn't dump the
goroutine correctly.
For newstack to save a useful state snapshot, it needs to be able
to rewind the PC in the function that triggered the split back to
the beginning of the function. (The PC is a few instructions in, just
after the call to morestack.) To make that possible, we change the
prologues to insert a jmp back to the beginning of the function
after the call to morestack. That is, the prologue used to be roughly:
TEXT myfunc
check for split
jmpcond nosplit
call morestack
nosplit:
sub $xxx, sp
Now an extra instruction is inserted after the call:
TEXT myfunc
start:
check for split
jmpcond nosplit
call morestack
jmp start
nosplit:
sub $xxx, sp
The jmp is not executed directly. It is decoded and simulated by
runtime.rewindmorestack to discover the beginning of the function,
and then the call to morestack returns directly to the start label
instead of to the jump instruction. So logically the jmp is still
executed, just not by the cpu.
The prologue thus repeats in the case of a function that needs a
stack split, but against the cost of the split itself, the extra few
instructions are noise. The repeated prologue has the nice effect of
making a stack split double-check that the new stack is big enough:
if morestack happens to return on a too-small stack, we'll now notice
before corruption happens.
The ability for newstack to rewind to the beginning of the function
should help preemption too. If newstack decides that it was called
for preemption instead of a stack split, it now has the goroutine state
correctly paused if rescheduling is needed, and when the goroutine
can run again, it can return to the start label on its original stack
and re-execute the split check.
Here is an example of a split stack overflow showing the full
trace, without any special cases in the stack printer.
(This one was triggered by making the split check incorrect.)
runtime: newstack framesize=0x0 argsize=0x18 sp=0x6aebd0 stack=[0x6b0000, 0x6b0fa0]
morebuf={pc:0x69f5b sp:0x6aebd8 lr:0x0}
sched={pc:0x68880 sp:0x6aebd0 lr:0x0 ctxt:0x34e700}
runtime: split stack overflow: 0x6aebd0 < 0x6b0000
fatal error: runtime: split stack overflow
goroutine 1 [stack split]:
runtime.mallocgc(0x290, 0x100000000, 0x1)
/Users/rsc/g/go/src/pkg/runtime/zmalloc_darwin_amd64.c:21 fp=0x6aebd8
runtime.new()
/Users/rsc/g/go/src/pkg/runtime/zmalloc_darwin_amd64.c:682 +0x5b fp=0x6aec08
go/build.(*Context).Import(0x5ae340, 0xc210030c71, 0xa, 0xc2100b4380, 0x1b, ...)
/Users/rsc/g/go/src/pkg/go/build/build.go:424 +0x3a fp=0x6b00a0
main.loadImport(0xc210030c71, 0xa, 0xc2100b4380, 0x1b, 0xc2100b42c0, ...)
/Users/rsc/g/go/src/cmd/go/pkg.go:249 +0x371 fp=0x6b01a8
main.(*Package).load(0xc21017c800, 0xc2100b42c0, 0xc2101828c0, 0x0, 0x0, ...)
/Users/rsc/g/go/src/cmd/go/pkg.go:431 +0x2801 fp=0x6b0c98
main.loadPackage(0x369040, 0x7, 0xc2100b42c0, 0x0)
/Users/rsc/g/go/src/cmd/go/pkg.go:709 +0x857 fp=0x6b0f80
----- stack segment boundary -----
main.(*builder).action(0xc2100902a0, 0x0, 0x0, 0xc2100e6c00, 0xc2100e5750, ...)
/Users/rsc/g/go/src/cmd/go/build.go:539 +0x437 fp=0x6b14a0
main.(*builder).action(0xc2100902a0, 0x0, 0x0, 0xc21015b400, 0x2, ...)
/Users/rsc/g/go/src/cmd/go/build.go:528 +0x1d2 fp=0x6b1658
main.(*builder).test(0xc2100902a0, 0xc210092000, 0x0, 0x0, 0xc21008ff60, ...)
/Users/rsc/g/go/src/cmd/go/test.go:622 +0x1b53 fp=0x6b1f68
----- stack segment boundary -----
main.runTest(0x5a6b20, 0xc21000a020, 0x2, 0x2)
/Users/rsc/g/go/src/cmd/go/test.go:366 +0xd09 fp=0x6a5cf0
main.main()
/Users/rsc/g/go/src/cmd/go/main.go:161 +0x4f9 fp=0x6a5f78
runtime.main()
/Users/rsc/g/go/src/pkg/runtime/proc.c:183 +0x92 fp=0x6a5fa0
runtime.goexit()
/Users/rsc/g/go/src/pkg/runtime/proc.c:1266 fp=0x6a5fa8
And here is a seg fault during oldstack:
SIGSEGV: segmentation violation
PC=0x1b2a6
runtime.oldstack()
/Users/rsc/g/go/src/pkg/runtime/stack.c:159 +0x76
runtime.lessstack()
/Users/rsc/g/go/src/pkg/runtime/asm_amd64.s:270 +0x22
goroutine 1 [stack unsplit]:
fmt.(*pp).printArg(0x2102e64e0, 0xe5c80, 0x2102c9220, 0x73, 0x0, ...)
/Users/rsc/g/go/src/pkg/fmt/print.go:818 +0x3d3 fp=0x221031e6f8
fmt.(*pp).doPrintf(0x2102e64e0, 0x12fb20, 0x2, 0x221031eb98, 0x1, ...)
/Users/rsc/g/go/src/pkg/fmt/print.go:1183 +0x15cb fp=0x221031eaf0
fmt.Sprintf(0x12fb20, 0x2, 0x221031eb98, 0x1, 0x1, ...)
/Users/rsc/g/go/src/pkg/fmt/print.go:234 +0x67 fp=0x221031eb40
flag.(*stringValue).String(0x2102c9210, 0x1, 0x0)
/Users/rsc/g/go/src/pkg/flag/flag.go:180 +0xb3 fp=0x221031ebb0
flag.(*FlagSet).Var(0x2102f6000, 0x293d38, 0x2102c9210, 0x143490, 0xa, ...)
/Users/rsc/g/go/src/pkg/flag/flag.go:633 +0x40 fp=0x221031eca0
flag.(*FlagSet).StringVar(0x2102f6000, 0x2102c9210, 0x143490, 0xa, 0x12fa60, ...)
/Users/rsc/g/go/src/pkg/flag/flag.go:550 +0x91 fp=0x221031ece8
flag.(*FlagSet).String(0x2102f6000, 0x143490, 0xa, 0x12fa60, 0x0, ...)
/Users/rsc/g/go/src/pkg/flag/flag.go:563 +0x87 fp=0x221031ed38
flag.String(0x143490, 0xa, 0x12fa60, 0x0, 0x161950, ...)
/Users/rsc/g/go/src/pkg/flag/flag.go:570 +0x6b fp=0x221031ed80
testing.init()
/Users/rsc/g/go/src/pkg/testing/testing.go:-531 +0xbb fp=0x221031edc0
strings_test.init()
/Users/rsc/g/go/src/pkg/strings/strings_test.go:1115 +0x62 fp=0x221031ef70
main.init()
strings/_test/_testmain.go:90 +0x3d fp=0x221031ef78
runtime.main()
/Users/rsc/g/go/src/pkg/runtime/proc.c:180 +0x8a fp=0x221031efa0
runtime.goexit()
/Users/rsc/g/go/src/pkg/runtime/proc.c:1269 fp=0x221031efa8
goroutine 2 [runnable]:
runtime.MHeap_Scavenger()
/Users/rsc/g/go/src/pkg/runtime/mheap.c:438
runtime.goexit()
/Users/rsc/g/go/src/pkg/runtime/proc.c:1269
created by runtime.main
/Users/rsc/g/go/src/pkg/runtime/proc.c:166
rax 0x23ccc0
rbx 0x23ccc0
rcx 0x0
rdx 0x38
rdi 0x2102c0170
rsi 0x221032cfe0
rbp 0x221032cfa0
rsp 0x7fff5fbff5b0
r8 0x2102c0120
r9 0x221032cfa0
r10 0x221032c000
r11 0x104ce8
r12 0xe5c80
r13 0x1be82baac718
r14 0x13091135f7d69200
r15 0x0
rip 0x1b2a6
rflags 0x10246
cs 0x2b
fs 0x0
gs 0x0
Fixes #5723.
R=r, dvyukov, go.peter.90, dave, iant
CC=golang-dev
https://golang.org/cl/10360048
2013-06-27 09:32:01 -06:00
|
|
|
MOVQ AX, m_cret(BX) // f's PC
|
2011-01-14 12:05:20 -07:00
|
|
|
MOVQ DX, m_moreargp(BX) // argument frame pointer
|
|
|
|
MOVL CX, m_moreargsize(BX) // f's argument size
|
|
|
|
MOVL $1, m_moreframesize(BX) // f's frame size
|
2009-07-08 19:16:09 -06:00
|
|
|
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
// Call newstack on m->g0's stack.
|
2010-08-04 18:50:22 -06:00
|
|
|
MOVQ m_g0(BX), BP
|
|
|
|
get_tls(CX)
|
|
|
|
MOVQ BP, g(CX)
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ (g_sched+gobuf_sp)(BP), SP
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
CALL runtime·newstack(SB)
|
2009-07-08 19:16:09 -06:00
|
|
|
MOVQ $0, 0x1103 // crash if newstack returns
|
2009-06-17 16:12:16 -06:00
|
|
|
RET
|
|
|
|
|
2013-08-02 14:03:14 -06:00
|
|
|
// reflect·call: call a function with the given argument list
|
|
|
|
// func call(f *FuncVal, arg *byte, argsize uint32).
|
|
|
|
// we don't have variable-sized frames, so we use a small number
|
|
|
|
// of constant-sized-frame functions to encode a few bits of size in the pc.
|
|
|
|
// Caution: ugly multiline assembly macros in your future!
|
|
|
|
|
|
|
|
#define DISPATCH(NAME,MAXSIZE) \
|
|
|
|
CMPQ CX, $MAXSIZE; \
|
|
|
|
JA 3(PC); \
|
2014-07-30 11:11:44 -06:00
|
|
|
MOVQ $NAME(SB), AX; \
|
2013-08-02 14:03:14 -06:00
|
|
|
JMP AX
|
2014-07-30 11:11:44 -06:00
|
|
|
// Note: can't just "JMP NAME(SB)" - bad inlining results.
|
2013-08-02 14:03:14 -06:00
|
|
|
|
reflect, runtime: fix crash in GC due to reflect.call + precise GC
Given
type Outer struct {
*Inner
...
}
the compiler generates the implementation of (*Outer).M dispatching to
the embedded Inner. The implementation is logically:
func (p *Outer) M() {
(p.Inner).M()
}
but since the only change here is the replacement of one pointer
receiver with another, the actual generated code overwrites the
original receiver with the p.Inner pointer and then jumps to the M
method expecting the *Inner receiver.
During reflect.Value.Call, we create an argument frame and the
associated data structures to describe it to the garbage collector,
populate the frame, call reflect.call to run a function call using
that frame, and then copy the results back out of the frame. The
reflect.call function does a memmove of the frame structure onto the
stack (to set up the inputs), runs the call, and the memmoves the
stack back to the frame structure (to preserve the outputs).
Originally reflect.call did not distinguish inputs from outputs: both
memmoves were for the full stack frame. However, in the case where the
called function was one of these wrappers, the rewritten receiver is
almost certainly a different type than the original receiver. This is
not a problem on the stack, where we use the program counter to
determine the type information and understand that during (*Outer).M
the receiver is an *Outer while during (*Inner).M the receiver in the
same memory word is now an *Inner. But in the statically typed
argument frame created by reflect, the receiver is always an *Outer.
Copying the modified receiver pointer off the stack into the frame
will store an *Inner there, and then if a garbage collection happens
to scan that argument frame before it is discarded, it will scan the
*Inner memory as if it were an *Outer. If the two have different
memory layouts, the collection will intepret the memory incorrectly.
Fix by only copying back the results.
Fixes #7725.
LGTM=khr
R=khr
CC=dave, golang-codereviews
https://golang.org/cl/85180043
2014-04-08 09:11:35 -06:00
|
|
|
TEXT reflect·call(SB), NOSPLIT, $0-24
|
2013-08-02 14:03:14 -06:00
|
|
|
MOVLQZX argsize+16(FP), CX
|
2014-07-30 11:11:44 -06:00
|
|
|
DISPATCH(runtime·call16, 16)
|
|
|
|
DISPATCH(runtime·call32, 32)
|
|
|
|
DISPATCH(runtime·call64, 64)
|
|
|
|
DISPATCH(runtime·call128, 128)
|
|
|
|
DISPATCH(runtime·call256, 256)
|
|
|
|
DISPATCH(runtime·call512, 512)
|
|
|
|
DISPATCH(runtime·call1024, 1024)
|
|
|
|
DISPATCH(runtime·call2048, 2048)
|
|
|
|
DISPATCH(runtime·call4096, 4096)
|
|
|
|
DISPATCH(runtime·call8192, 8192)
|
|
|
|
DISPATCH(runtime·call16384, 16384)
|
|
|
|
DISPATCH(runtime·call32768, 32768)
|
|
|
|
DISPATCH(runtime·call65536, 65536)
|
|
|
|
DISPATCH(runtime·call131072, 131072)
|
|
|
|
DISPATCH(runtime·call262144, 262144)
|
|
|
|
DISPATCH(runtime·call524288, 524288)
|
|
|
|
DISPATCH(runtime·call1048576, 1048576)
|
|
|
|
DISPATCH(runtime·call2097152, 2097152)
|
|
|
|
DISPATCH(runtime·call4194304, 4194304)
|
|
|
|
DISPATCH(runtime·call8388608, 8388608)
|
|
|
|
DISPATCH(runtime·call16777216, 16777216)
|
|
|
|
DISPATCH(runtime·call33554432, 33554432)
|
|
|
|
DISPATCH(runtime·call67108864, 67108864)
|
|
|
|
DISPATCH(runtime·call134217728, 134217728)
|
|
|
|
DISPATCH(runtime·call268435456, 268435456)
|
|
|
|
DISPATCH(runtime·call536870912, 536870912)
|
|
|
|
DISPATCH(runtime·call1073741824, 1073741824)
|
2013-08-02 14:03:14 -06:00
|
|
|
MOVQ $runtime·badreflectcall(SB), AX
|
|
|
|
JMP AX
|
|
|
|
|
2014-05-21 15:28:34 -06:00
|
|
|
// Argument map for the callXX frames. Each has one
|
|
|
|
// stack map (for the single call) with 3 arguments.
|
|
|
|
DATA gcargs_reflectcall<>+0x00(SB)/4, $1 // 1 stackmap
|
|
|
|
DATA gcargs_reflectcall<>+0x04(SB)/4, $6 // 3 args
|
|
|
|
DATA gcargs_reflectcall<>+0x08(SB)/4, $(const_BitsPointer+(const_BitsPointer<<2)+(const_BitsScalar<<4))
|
|
|
|
GLOBL gcargs_reflectcall<>(SB),RODATA,$12
|
|
|
|
|
|
|
|
// callXX frames have no locals
|
|
|
|
DATA gclocals_reflectcall<>+0x00(SB)/4, $1 // 1 stackmap
|
|
|
|
DATA gclocals_reflectcall<>+0x04(SB)/4, $0 // 0 locals
|
|
|
|
GLOBL gclocals_reflectcall<>(SB),RODATA,$8
|
|
|
|
|
2013-08-06 15:33:55 -06:00
|
|
|
#define CALLFN(NAME,MAXSIZE) \
|
2014-07-30 11:11:44 -06:00
|
|
|
TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
|
2014-05-21 15:28:34 -06:00
|
|
|
FUNCDATA $FUNCDATA_ArgsPointerMaps,gcargs_reflectcall<>(SB); \
|
|
|
|
FUNCDATA $FUNCDATA_LocalsPointerMaps,gclocals_reflectcall<>(SB);\
|
2013-08-02 14:03:14 -06:00
|
|
|
/* copy arguments to stack */ \
|
|
|
|
MOVQ argptr+8(FP), SI; \
|
|
|
|
MOVLQZX argsize+16(FP), CX; \
|
|
|
|
MOVQ SP, DI; \
|
|
|
|
REP;MOVSB; \
|
|
|
|
/* call function */ \
|
|
|
|
MOVQ f+0(FP), DX; \
|
2014-05-21 15:28:34 -06:00
|
|
|
PCDATA $PCDATA_StackMapIndex, $0; \
|
2013-08-02 14:03:14 -06:00
|
|
|
CALL (DX); \
|
|
|
|
/* copy return values back */ \
|
|
|
|
MOVQ argptr+8(FP), DI; \
|
|
|
|
MOVLQZX argsize+16(FP), CX; \
|
reflect, runtime: fix crash in GC due to reflect.call + precise GC
Given
type Outer struct {
*Inner
...
}
the compiler generates the implementation of (*Outer).M dispatching to
the embedded Inner. The implementation is logically:
func (p *Outer) M() {
(p.Inner).M()
}
but since the only change here is the replacement of one pointer
receiver with another, the actual generated code overwrites the
original receiver with the p.Inner pointer and then jumps to the M
method expecting the *Inner receiver.
During reflect.Value.Call, we create an argument frame and the
associated data structures to describe it to the garbage collector,
populate the frame, call reflect.call to run a function call using
that frame, and then copy the results back out of the frame. The
reflect.call function does a memmove of the frame structure onto the
stack (to set up the inputs), runs the call, and the memmoves the
stack back to the frame structure (to preserve the outputs).
Originally reflect.call did not distinguish inputs from outputs: both
memmoves were for the full stack frame. However, in the case where the
called function was one of these wrappers, the rewritten receiver is
almost certainly a different type than the original receiver. This is
not a problem on the stack, where we use the program counter to
determine the type information and understand that during (*Outer).M
the receiver is an *Outer while during (*Inner).M the receiver in the
same memory word is now an *Inner. But in the statically typed
argument frame created by reflect, the receiver is always an *Outer.
Copying the modified receiver pointer off the stack into the frame
will store an *Inner there, and then if a garbage collection happens
to scan that argument frame before it is discarded, it will scan the
*Inner memory as if it were an *Outer. If the two have different
memory layouts, the collection will intepret the memory incorrectly.
Fix by only copying back the results.
Fixes #7725.
LGTM=khr
R=khr
CC=dave, golang-codereviews
https://golang.org/cl/85180043
2014-04-08 09:11:35 -06:00
|
|
|
MOVLQZX retoffset+20(FP), BX; \
|
2013-08-02 14:03:14 -06:00
|
|
|
MOVQ SP, SI; \
|
reflect, runtime: fix crash in GC due to reflect.call + precise GC
Given
type Outer struct {
*Inner
...
}
the compiler generates the implementation of (*Outer).M dispatching to
the embedded Inner. The implementation is logically:
func (p *Outer) M() {
(p.Inner).M()
}
but since the only change here is the replacement of one pointer
receiver with another, the actual generated code overwrites the
original receiver with the p.Inner pointer and then jumps to the M
method expecting the *Inner receiver.
During reflect.Value.Call, we create an argument frame and the
associated data structures to describe it to the garbage collector,
populate the frame, call reflect.call to run a function call using
that frame, and then copy the results back out of the frame. The
reflect.call function does a memmove of the frame structure onto the
stack (to set up the inputs), runs the call, and the memmoves the
stack back to the frame structure (to preserve the outputs).
Originally reflect.call did not distinguish inputs from outputs: both
memmoves were for the full stack frame. However, in the case where the
called function was one of these wrappers, the rewritten receiver is
almost certainly a different type than the original receiver. This is
not a problem on the stack, where we use the program counter to
determine the type information and understand that during (*Outer).M
the receiver is an *Outer while during (*Inner).M the receiver in the
same memory word is now an *Inner. But in the statically typed
argument frame created by reflect, the receiver is always an *Outer.
Copying the modified receiver pointer off the stack into the frame
will store an *Inner there, and then if a garbage collection happens
to scan that argument frame before it is discarded, it will scan the
*Inner memory as if it were an *Outer. If the two have different
memory layouts, the collection will intepret the memory incorrectly.
Fix by only copying back the results.
Fixes #7725.
LGTM=khr
R=khr
CC=dave, golang-codereviews
https://golang.org/cl/85180043
2014-04-08 09:11:35 -06:00
|
|
|
ADDQ BX, DI; \
|
|
|
|
ADDQ BX, SI; \
|
|
|
|
SUBQ BX, CX; \
|
2013-08-02 14:03:14 -06:00
|
|
|
REP;MOVSB; \
|
|
|
|
RET
|
|
|
|
|
2014-07-30 11:11:44 -06:00
|
|
|
CALLFN(runtime·call16, 16)
|
|
|
|
CALLFN(runtime·call32, 32)
|
|
|
|
CALLFN(runtime·call64, 64)
|
|
|
|
CALLFN(runtime·call128, 128)
|
|
|
|
CALLFN(runtime·call256, 256)
|
|
|
|
CALLFN(runtime·call512, 512)
|
|
|
|
CALLFN(runtime·call1024, 1024)
|
|
|
|
CALLFN(runtime·call2048, 2048)
|
|
|
|
CALLFN(runtime·call4096, 4096)
|
|
|
|
CALLFN(runtime·call8192, 8192)
|
|
|
|
CALLFN(runtime·call16384, 16384)
|
|
|
|
CALLFN(runtime·call32768, 32768)
|
|
|
|
CALLFN(runtime·call65536, 65536)
|
|
|
|
CALLFN(runtime·call131072, 131072)
|
|
|
|
CALLFN(runtime·call262144, 262144)
|
|
|
|
CALLFN(runtime·call524288, 524288)
|
|
|
|
CALLFN(runtime·call1048576, 1048576)
|
|
|
|
CALLFN(runtime·call2097152, 2097152)
|
|
|
|
CALLFN(runtime·call4194304, 4194304)
|
|
|
|
CALLFN(runtime·call8388608, 8388608)
|
|
|
|
CALLFN(runtime·call16777216, 16777216)
|
|
|
|
CALLFN(runtime·call33554432, 33554432)
|
|
|
|
CALLFN(runtime·call67108864, 67108864)
|
|
|
|
CALLFN(runtime·call134217728, 134217728)
|
|
|
|
CALLFN(runtime·call268435456, 268435456)
|
|
|
|
CALLFN(runtime·call536870912, 536870912)
|
|
|
|
CALLFN(runtime·call1073741824, 1073741824)
|
2013-08-02 14:03:14 -06:00
|
|
|
|
2009-06-17 16:12:16 -06:00
|
|
|
// Return point when leaving stack.
|
2013-07-18 14:53:45 -06:00
|
|
|
//
|
|
|
|
// Lessstack can appear in stack traces for the same reason
|
|
|
|
// as morestack; in that context, it has 0 arguments.
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·lessstack(SB), NOSPLIT, $0-0
|
2009-06-17 16:12:16 -06:00
|
|
|
// Save return value in m->cret
|
2010-08-04 18:50:22 -06:00
|
|
|
get_tls(CX)
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ g(CX), BX
|
|
|
|
MOVQ g_m(BX), BX
|
2010-08-04 18:50:22 -06:00
|
|
|
MOVQ AX, m_cret(BX)
|
2009-06-17 16:12:16 -06:00
|
|
|
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
// Call oldstack on m->g0's stack.
|
|
|
|
MOVQ m_g0(BX), BP
|
|
|
|
MOVQ BP, g(CX)
|
|
|
|
MOVQ (g_sched+gobuf_sp)(BP), SP
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
CALL runtime·oldstack(SB)
|
2009-06-17 16:12:16 -06:00
|
|
|
MOVQ $0, 0x1004 // crash if oldstack returns
|
|
|
|
RET
|
|
|
|
|
2009-05-01 19:07:33 -06:00
|
|
|
// morestack trampolines
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·morestack00(SB),NOSPLIT,$0
|
2010-08-04 18:50:22 -06:00
|
|
|
get_tls(CX)
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ g(CX), BX
|
|
|
|
MOVQ g_m(BX), BX
|
2009-05-01 19:07:33 -06:00
|
|
|
MOVQ $0, AX
|
2011-01-14 12:05:20 -07:00
|
|
|
MOVQ AX, m_moreframesize(BX)
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
MOVQ $runtime·morestack(SB), AX
|
2009-05-01 19:07:33 -06:00
|
|
|
JMP AX
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·morestack01(SB),NOSPLIT,$0
|
2010-08-04 18:50:22 -06:00
|
|
|
get_tls(CX)
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ g(CX), BX
|
|
|
|
MOVQ g_m(BX), BX
|
2009-05-01 19:07:33 -06:00
|
|
|
SHLQ $32, AX
|
2011-01-14 12:05:20 -07:00
|
|
|
MOVQ AX, m_moreframesize(BX)
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
MOVQ $runtime·morestack(SB), AX
|
2009-05-01 19:07:33 -06:00
|
|
|
JMP AX
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·morestack10(SB),NOSPLIT,$0
|
2010-08-04 18:50:22 -06:00
|
|
|
get_tls(CX)
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ g(CX), BX
|
|
|
|
MOVQ g_m(BX), BX
|
2009-05-01 19:07:33 -06:00
|
|
|
MOVLQZX AX, AX
|
2011-01-14 12:05:20 -07:00
|
|
|
MOVQ AX, m_moreframesize(BX)
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
MOVQ $runtime·morestack(SB), AX
|
2009-05-01 19:07:33 -06:00
|
|
|
JMP AX
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·morestack11(SB),NOSPLIT,$0
|
2010-08-04 18:50:22 -06:00
|
|
|
get_tls(CX)
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ g(CX), BX
|
|
|
|
MOVQ g_m(BX), BX
|
2011-01-14 12:05:20 -07:00
|
|
|
MOVQ AX, m_moreframesize(BX)
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
MOVQ $runtime·morestack(SB), AX
|
2009-05-01 19:07:33 -06:00
|
|
|
JMP AX
|
|
|
|
|
2009-05-03 20:09:14 -06:00
|
|
|
// subcases of morestack01
|
|
|
|
// with const of 8,16,...48
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·morestack8(SB),NOSPLIT,$0
|
2013-07-29 12:22:34 -06:00
|
|
|
MOVQ $1, R8
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
MOVQ $morestack<>(SB), AX
|
2009-05-03 20:09:14 -06:00
|
|
|
JMP AX
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·morestack16(SB),NOSPLIT,$0
|
2013-07-29 12:22:34 -06:00
|
|
|
MOVQ $2, R8
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
MOVQ $morestack<>(SB), AX
|
2009-05-03 20:09:14 -06:00
|
|
|
JMP AX
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·morestack24(SB),NOSPLIT,$0
|
2013-07-29 12:22:34 -06:00
|
|
|
MOVQ $3, R8
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
MOVQ $morestack<>(SB), AX
|
2009-05-03 20:09:14 -06:00
|
|
|
JMP AX
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·morestack32(SB),NOSPLIT,$0
|
2013-07-29 12:22:34 -06:00
|
|
|
MOVQ $4, R8
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
MOVQ $morestack<>(SB), AX
|
2009-05-03 20:09:14 -06:00
|
|
|
JMP AX
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·morestack40(SB),NOSPLIT,$0
|
2013-07-29 12:22:34 -06:00
|
|
|
MOVQ $5, R8
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
MOVQ $morestack<>(SB), AX
|
2009-05-03 20:09:14 -06:00
|
|
|
JMP AX
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·morestack48(SB),NOSPLIT,$0
|
2013-07-29 12:22:34 -06:00
|
|
|
MOVQ $6, R8
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
MOVQ $morestack<>(SB), AX
|
2009-05-03 20:09:14 -06:00
|
|
|
JMP AX
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT morestack<>(SB),NOSPLIT,$0
|
2010-08-04 18:50:22 -06:00
|
|
|
get_tls(CX)
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ g(CX), BX
|
|
|
|
MOVQ g_m(BX), BX
|
2013-07-29 12:22:34 -06:00
|
|
|
SHLQ $35, R8
|
|
|
|
MOVQ R8, m_moreframesize(BX)
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
MOVQ $runtime·morestack(SB), AX
|
2008-07-12 12:30:53 -06:00
|
|
|
JMP AX
|
|
|
|
|
2014-03-04 11:53:08 -07:00
|
|
|
TEXT runtime·morestack00_noctxt(SB),NOSPLIT,$0
|
|
|
|
MOVL $0, DX
|
|
|
|
JMP runtime·morestack00(SB)
|
|
|
|
|
|
|
|
TEXT runtime·morestack01_noctxt(SB),NOSPLIT,$0
|
|
|
|
MOVL $0, DX
|
|
|
|
JMP runtime·morestack01(SB)
|
|
|
|
|
|
|
|
TEXT runtime·morestack10_noctxt(SB),NOSPLIT,$0
|
|
|
|
MOVL $0, DX
|
|
|
|
JMP runtime·morestack10(SB)
|
|
|
|
|
|
|
|
TEXT runtime·morestack11_noctxt(SB),NOSPLIT,$0
|
|
|
|
MOVL $0, DX
|
|
|
|
JMP runtime·morestack11(SB)
|
|
|
|
|
|
|
|
TEXT runtime·morestack8_noctxt(SB),NOSPLIT,$0
|
|
|
|
MOVL $0, DX
|
|
|
|
JMP runtime·morestack8(SB)
|
|
|
|
|
|
|
|
TEXT runtime·morestack16_noctxt(SB),NOSPLIT,$0
|
|
|
|
MOVL $0, DX
|
|
|
|
JMP runtime·morestack16(SB)
|
|
|
|
|
|
|
|
TEXT runtime·morestack24_noctxt(SB),NOSPLIT,$0
|
|
|
|
MOVL $0, DX
|
|
|
|
JMP runtime·morestack24(SB)
|
|
|
|
|
|
|
|
TEXT runtime·morestack32_noctxt(SB),NOSPLIT,$0
|
|
|
|
MOVL $0, DX
|
|
|
|
JMP runtime·morestack32(SB)
|
|
|
|
|
|
|
|
TEXT runtime·morestack40_noctxt(SB),NOSPLIT,$0
|
|
|
|
MOVL $0, DX
|
|
|
|
JMP runtime·morestack40(SB)
|
|
|
|
|
|
|
|
TEXT runtime·morestack48_noctxt(SB),NOSPLIT,$0
|
|
|
|
MOVL $0, DX
|
|
|
|
JMP runtime·morestack48(SB)
|
|
|
|
|
2008-08-04 17:43:49 -06:00
|
|
|
// bool cas(int32 *val, int32 old, int32 new)
|
|
|
|
// Atomically:
|
|
|
|
// if(*val == old){
|
|
|
|
// *val = new;
|
|
|
|
// return 1;
|
2009-01-27 13:03:53 -07:00
|
|
|
// } else
|
2008-08-04 17:43:49 -06:00
|
|
|
// return 0;
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
TEXT runtime·cas(SB), NOSPLIT, $0-17
|
|
|
|
MOVQ ptr+0(FP), BX
|
|
|
|
MOVL old+8(FP), AX
|
|
|
|
MOVL new+12(FP), CX
|
2008-08-04 17:43:49 -06:00
|
|
|
LOCK
|
|
|
|
CMPXCHGL CX, 0(BX)
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
JZ 4(PC)
|
2008-08-04 17:43:49 -06:00
|
|
|
MOVL $0, AX
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVB AX, ret+16(FP)
|
2008-08-04 17:43:49 -06:00
|
|
|
RET
|
|
|
|
MOVL $1, AX
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVB AX, ret+16(FP)
|
2008-08-04 17:43:49 -06:00
|
|
|
RET
|
2009-01-27 13:03:53 -07:00
|
|
|
|
2013-07-16 14:24:09 -06:00
|
|
|
// bool runtime·cas64(uint64 *val, uint64 old, uint64 new)
|
2012-04-05 08:47:43 -06:00
|
|
|
// Atomically:
|
|
|
|
// if(*val == *old){
|
|
|
|
// *val = new;
|
|
|
|
// return 1;
|
|
|
|
// } else {
|
|
|
|
// return 0;
|
|
|
|
// }
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
TEXT runtime·cas64(SB), NOSPLIT, $0-25
|
|
|
|
MOVQ ptr+0(FP), BX
|
|
|
|
MOVQ old+8(FP), AX
|
|
|
|
MOVQ new+16(FP), CX
|
2012-04-05 08:47:43 -06:00
|
|
|
LOCK
|
|
|
|
CMPXCHGQ CX, 0(BX)
|
|
|
|
JNZ cas64_fail
|
|
|
|
MOVL $1, AX
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVB AX, ret+24(FP)
|
2012-04-05 08:47:43 -06:00
|
|
|
RET
|
|
|
|
cas64_fail:
|
|
|
|
MOVL $0, AX
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVB AX, ret+24(FP)
|
2012-04-05 08:47:43 -06:00
|
|
|
RET
|
cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
2014-08-27 19:59:49 -06:00
|
|
|
|
|
|
|
TEXT runtime·casuintptr(SB), NOSPLIT, $0-25
|
|
|
|
JMP runtime·cas64(SB)
|
2012-04-05 08:47:43 -06:00
|
|
|
|
2014-08-29 14:20:48 -06:00
|
|
|
TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $0-16
|
|
|
|
JMP runtime·atomicload64(SB)
|
|
|
|
|
2014-08-30 12:03:28 -06:00
|
|
|
TEXT runtime·atomicloaduint(SB), NOSPLIT, $0-16
|
|
|
|
JMP runtime·atomicload64(SB)
|
|
|
|
|
2011-02-16 11:21:13 -07:00
|
|
|
// bool casp(void **val, void *old, void *new)
|
|
|
|
// Atomically:
|
|
|
|
// if(*val == old){
|
|
|
|
// *val = new;
|
|
|
|
// return 1;
|
|
|
|
// } else
|
|
|
|
// return 0;
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
TEXT runtime·casp(SB), NOSPLIT, $0-25
|
|
|
|
MOVQ ptr+0(FP), BX
|
|
|
|
MOVQ old+8(FP), AX
|
|
|
|
MOVQ new+16(FP), CX
|
2011-02-16 11:21:13 -07:00
|
|
|
LOCK
|
|
|
|
CMPXCHGQ CX, 0(BX)
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
JZ 4(PC)
|
2011-02-16 11:21:13 -07:00
|
|
|
MOVL $0, AX
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVB AX, ret+24(FP)
|
2011-02-16 11:21:13 -07:00
|
|
|
RET
|
|
|
|
MOVL $1, AX
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVB AX, ret+24(FP)
|
2011-02-16 11:21:13 -07:00
|
|
|
RET
|
|
|
|
|
2011-07-15 09:27:16 -06:00
|
|
|
// uint32 xadd(uint32 volatile *val, int32 delta)
|
|
|
|
// Atomically:
|
|
|
|
// *val += delta;
|
|
|
|
// return *val;
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
TEXT runtime·xadd(SB), NOSPLIT, $0-20
|
|
|
|
MOVQ ptr+0(FP), BX
|
|
|
|
MOVL delta+8(FP), AX
|
2011-07-15 09:27:16 -06:00
|
|
|
MOVL AX, CX
|
|
|
|
LOCK
|
|
|
|
XADDL AX, 0(BX)
|
|
|
|
ADDL CX, AX
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVL AX, ret+16(FP)
|
2011-07-15 09:27:16 -06:00
|
|
|
RET
|
|
|
|
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
TEXT runtime·xadd64(SB), NOSPLIT, $0-24
|
|
|
|
MOVQ ptr+0(FP), BX
|
|
|
|
MOVQ delta+8(FP), AX
|
2012-04-05 08:47:43 -06:00
|
|
|
MOVQ AX, CX
|
|
|
|
LOCK
|
|
|
|
XADDQ AX, 0(BX)
|
|
|
|
ADDQ CX, AX
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ AX, ret+16(FP)
|
2012-04-05 08:47:43 -06:00
|
|
|
RET
|
|
|
|
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
TEXT runtime·xchg(SB), NOSPLIT, $0-20
|
|
|
|
MOVQ ptr+0(FP), BX
|
|
|
|
MOVL new+8(FP), AX
|
runtime: improve Linux mutex
The implementation is hybrid active/passive spin/blocking mutex.
The design minimizes amount of context switches and futex calls.
The idea is that all critical sections in runtime are intentially
small, so pure blocking mutex behaves badly causing
a lot of context switches, thread parking/unparking and kernel calls.
Note that some synthetic benchmarks become somewhat slower,
that's due to increased contention on other data structures,
it should not affect programs that do any real work.
On 2 x Intel E5620, 8 HT cores, 2.4GHz
benchmark old ns/op new ns/op delta
BenchmarkSelectContended 521.00 503.00 -3.45%
BenchmarkSelectContended-2 661.00 320.00 -51.59%
BenchmarkSelectContended-4 1139.00 629.00 -44.78%
BenchmarkSelectContended-8 2870.00 878.00 -69.41%
BenchmarkSelectContended-16 5276.00 818.00 -84.50%
BenchmarkChanContended 112.00 103.00 -8.04%
BenchmarkChanContended-2 631.00 174.00 -72.42%
BenchmarkChanContended-4 682.00 272.00 -60.12%
BenchmarkChanContended-8 1601.00 520.00 -67.52%
BenchmarkChanContended-16 3100.00 372.00 -88.00%
BenchmarkChanSync 253.00 239.00 -5.53%
BenchmarkChanSync-2 5030.00 4648.00 -7.59%
BenchmarkChanSync-4 4826.00 4694.00 -2.74%
BenchmarkChanSync-8 4778.00 4713.00 -1.36%
BenchmarkChanSync-16 5289.00 4710.00 -10.95%
BenchmarkChanProdCons0 273.00 254.00 -6.96%
BenchmarkChanProdCons0-2 599.00 400.00 -33.22%
BenchmarkChanProdCons0-4 1168.00 659.00 -43.58%
BenchmarkChanProdCons0-8 2831.00 1057.00 -62.66%
BenchmarkChanProdCons0-16 4197.00 1037.00 -75.29%
BenchmarkChanProdCons10 150.00 140.00 -6.67%
BenchmarkChanProdCons10-2 607.00 268.00 -55.85%
BenchmarkChanProdCons10-4 1137.00 404.00 -64.47%
BenchmarkChanProdCons10-8 2115.00 828.00 -60.85%
BenchmarkChanProdCons10-16 4283.00 855.00 -80.04%
BenchmarkChanProdCons100 117.00 110.00 -5.98%
BenchmarkChanProdCons100-2 558.00 218.00 -60.93%
BenchmarkChanProdCons100-4 722.00 287.00 -60.25%
BenchmarkChanProdCons100-8 1840.00 431.00 -76.58%
BenchmarkChanProdCons100-16 3394.00 448.00 -86.80%
BenchmarkChanProdConsWork0 2014.00 1996.00 -0.89%
BenchmarkChanProdConsWork0-2 1207.00 1127.00 -6.63%
BenchmarkChanProdConsWork0-4 1913.00 611.00 -68.06%
BenchmarkChanProdConsWork0-8 3016.00 949.00 -68.53%
BenchmarkChanProdConsWork0-16 4320.00 1154.00 -73.29%
BenchmarkChanProdConsWork10 1906.00 1897.00 -0.47%
BenchmarkChanProdConsWork10-2 1123.00 1033.00 -8.01%
BenchmarkChanProdConsWork10-4 1076.00 571.00 -46.93%
BenchmarkChanProdConsWork10-8 2748.00 1096.00 -60.12%
BenchmarkChanProdConsWork10-16 4600.00 1105.00 -75.98%
BenchmarkChanProdConsWork100 1884.00 1852.00 -1.70%
BenchmarkChanProdConsWork100-2 1235.00 1146.00 -7.21%
BenchmarkChanProdConsWork100-4 1217.00 619.00 -49.14%
BenchmarkChanProdConsWork100-8 1534.00 509.00 -66.82%
BenchmarkChanProdConsWork100-16 4126.00 918.00 -77.75%
BenchmarkSyscall 34.40 33.30 -3.20%
BenchmarkSyscall-2 160.00 121.00 -24.38%
BenchmarkSyscall-4 131.00 136.00 +3.82%
BenchmarkSyscall-8 139.00 131.00 -5.76%
BenchmarkSyscall-16 161.00 168.00 +4.35%
BenchmarkSyscallWork 950.00 950.00 +0.00%
BenchmarkSyscallWork-2 481.00 480.00 -0.21%
BenchmarkSyscallWork-4 268.00 270.00 +0.75%
BenchmarkSyscallWork-8 156.00 169.00 +8.33%
BenchmarkSyscallWork-16 188.00 184.00 -2.13%
BenchmarkSemaSyntNonblock 36.40 35.60 -2.20%
BenchmarkSemaSyntNonblock-2 81.40 45.10 -44.59%
BenchmarkSemaSyntNonblock-4 126.00 108.00 -14.29%
BenchmarkSemaSyntNonblock-8 112.00 112.00 +0.00%
BenchmarkSemaSyntNonblock-16 110.00 112.00 +1.82%
BenchmarkSemaSyntBlock 35.30 35.30 +0.00%
BenchmarkSemaSyntBlock-2 118.00 124.00 +5.08%
BenchmarkSemaSyntBlock-4 105.00 108.00 +2.86%
BenchmarkSemaSyntBlock-8 101.00 111.00 +9.90%
BenchmarkSemaSyntBlock-16 112.00 118.00 +5.36%
BenchmarkSemaWorkNonblock 810.00 811.00 +0.12%
BenchmarkSemaWorkNonblock-2 476.00 414.00 -13.03%
BenchmarkSemaWorkNonblock-4 238.00 228.00 -4.20%
BenchmarkSemaWorkNonblock-8 140.00 126.00 -10.00%
BenchmarkSemaWorkNonblock-16 117.00 116.00 -0.85%
BenchmarkSemaWorkBlock 810.00 811.00 +0.12%
BenchmarkSemaWorkBlock-2 454.00 466.00 +2.64%
BenchmarkSemaWorkBlock-4 243.00 241.00 -0.82%
BenchmarkSemaWorkBlock-8 145.00 137.00 -5.52%
BenchmarkSemaWorkBlock-16 132.00 123.00 -6.82%
BenchmarkContendedSemaphore 123.00 102.00 -17.07%
BenchmarkContendedSemaphore-2 34.80 34.90 +0.29%
BenchmarkContendedSemaphore-4 34.70 34.80 +0.29%
BenchmarkContendedSemaphore-8 34.70 34.70 +0.00%
BenchmarkContendedSemaphore-16 34.80 34.70 -0.29%
BenchmarkMutex 26.80 26.00 -2.99%
BenchmarkMutex-2 108.00 45.20 -58.15%
BenchmarkMutex-4 103.00 127.00 +23.30%
BenchmarkMutex-8 109.00 147.00 +34.86%
BenchmarkMutex-16 102.00 152.00 +49.02%
BenchmarkMutexSlack 27.00 26.90 -0.37%
BenchmarkMutexSlack-2 149.00 165.00 +10.74%
BenchmarkMutexSlack-4 121.00 209.00 +72.73%
BenchmarkMutexSlack-8 101.00 158.00 +56.44%
BenchmarkMutexSlack-16 97.00 129.00 +32.99%
BenchmarkMutexWork 792.00 794.00 +0.25%
BenchmarkMutexWork-2 407.00 409.00 +0.49%
BenchmarkMutexWork-4 220.00 209.00 -5.00%
BenchmarkMutexWork-8 267.00 160.00 -40.07%
BenchmarkMutexWork-16 315.00 300.00 -4.76%
BenchmarkMutexWorkSlack 792.00 793.00 +0.13%
BenchmarkMutexWorkSlack-2 406.00 404.00 -0.49%
BenchmarkMutexWorkSlack-4 225.00 212.00 -5.78%
BenchmarkMutexWorkSlack-8 268.00 136.00 -49.25%
BenchmarkMutexWorkSlack-16 300.00 300.00 +0.00%
BenchmarkRWMutexWrite100 27.10 27.00 -0.37%
BenchmarkRWMutexWrite100-2 33.10 40.80 +23.26%
BenchmarkRWMutexWrite100-4 113.00 88.10 -22.04%
BenchmarkRWMutexWrite100-8 119.00 95.30 -19.92%
BenchmarkRWMutexWrite100-16 148.00 109.00 -26.35%
BenchmarkRWMutexWrite10 29.60 29.40 -0.68%
BenchmarkRWMutexWrite10-2 111.00 61.40 -44.68%
BenchmarkRWMutexWrite10-4 270.00 208.00 -22.96%
BenchmarkRWMutexWrite10-8 204.00 185.00 -9.31%
BenchmarkRWMutexWrite10-16 261.00 190.00 -27.20%
BenchmarkRWMutexWorkWrite100 1040.00 1036.00 -0.38%
BenchmarkRWMutexWorkWrite100-2 593.00 580.00 -2.19%
BenchmarkRWMutexWorkWrite100-4 470.00 365.00 -22.34%
BenchmarkRWMutexWorkWrite100-8 468.00 289.00 -38.25%
BenchmarkRWMutexWorkWrite100-16 604.00 374.00 -38.08%
BenchmarkRWMutexWorkWrite10 951.00 951.00 +0.00%
BenchmarkRWMutexWorkWrite10-2 1001.00 928.00 -7.29%
BenchmarkRWMutexWorkWrite10-4 1555.00 1006.00 -35.31%
BenchmarkRWMutexWorkWrite10-8 2085.00 1171.00 -43.84%
BenchmarkRWMutexWorkWrite10-16 2082.00 1614.00 -22.48%
R=rsc, iant, msolo, fw, iant
CC=golang-dev
https://golang.org/cl/4711045
2011-07-29 10:44:06 -06:00
|
|
|
XCHGL AX, 0(BX)
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVL AX, ret+16(FP)
|
runtime: improve Linux mutex
The implementation is hybrid active/passive spin/blocking mutex.
The design minimizes amount of context switches and futex calls.
The idea is that all critical sections in runtime are intentially
small, so pure blocking mutex behaves badly causing
a lot of context switches, thread parking/unparking and kernel calls.
Note that some synthetic benchmarks become somewhat slower,
that's due to increased contention on other data structures,
it should not affect programs that do any real work.
On 2 x Intel E5620, 8 HT cores, 2.4GHz
benchmark old ns/op new ns/op delta
BenchmarkSelectContended 521.00 503.00 -3.45%
BenchmarkSelectContended-2 661.00 320.00 -51.59%
BenchmarkSelectContended-4 1139.00 629.00 -44.78%
BenchmarkSelectContended-8 2870.00 878.00 -69.41%
BenchmarkSelectContended-16 5276.00 818.00 -84.50%
BenchmarkChanContended 112.00 103.00 -8.04%
BenchmarkChanContended-2 631.00 174.00 -72.42%
BenchmarkChanContended-4 682.00 272.00 -60.12%
BenchmarkChanContended-8 1601.00 520.00 -67.52%
BenchmarkChanContended-16 3100.00 372.00 -88.00%
BenchmarkChanSync 253.00 239.00 -5.53%
BenchmarkChanSync-2 5030.00 4648.00 -7.59%
BenchmarkChanSync-4 4826.00 4694.00 -2.74%
BenchmarkChanSync-8 4778.00 4713.00 -1.36%
BenchmarkChanSync-16 5289.00 4710.00 -10.95%
BenchmarkChanProdCons0 273.00 254.00 -6.96%
BenchmarkChanProdCons0-2 599.00 400.00 -33.22%
BenchmarkChanProdCons0-4 1168.00 659.00 -43.58%
BenchmarkChanProdCons0-8 2831.00 1057.00 -62.66%
BenchmarkChanProdCons0-16 4197.00 1037.00 -75.29%
BenchmarkChanProdCons10 150.00 140.00 -6.67%
BenchmarkChanProdCons10-2 607.00 268.00 -55.85%
BenchmarkChanProdCons10-4 1137.00 404.00 -64.47%
BenchmarkChanProdCons10-8 2115.00 828.00 -60.85%
BenchmarkChanProdCons10-16 4283.00 855.00 -80.04%
BenchmarkChanProdCons100 117.00 110.00 -5.98%
BenchmarkChanProdCons100-2 558.00 218.00 -60.93%
BenchmarkChanProdCons100-4 722.00 287.00 -60.25%
BenchmarkChanProdCons100-8 1840.00 431.00 -76.58%
BenchmarkChanProdCons100-16 3394.00 448.00 -86.80%
BenchmarkChanProdConsWork0 2014.00 1996.00 -0.89%
BenchmarkChanProdConsWork0-2 1207.00 1127.00 -6.63%
BenchmarkChanProdConsWork0-4 1913.00 611.00 -68.06%
BenchmarkChanProdConsWork0-8 3016.00 949.00 -68.53%
BenchmarkChanProdConsWork0-16 4320.00 1154.00 -73.29%
BenchmarkChanProdConsWork10 1906.00 1897.00 -0.47%
BenchmarkChanProdConsWork10-2 1123.00 1033.00 -8.01%
BenchmarkChanProdConsWork10-4 1076.00 571.00 -46.93%
BenchmarkChanProdConsWork10-8 2748.00 1096.00 -60.12%
BenchmarkChanProdConsWork10-16 4600.00 1105.00 -75.98%
BenchmarkChanProdConsWork100 1884.00 1852.00 -1.70%
BenchmarkChanProdConsWork100-2 1235.00 1146.00 -7.21%
BenchmarkChanProdConsWork100-4 1217.00 619.00 -49.14%
BenchmarkChanProdConsWork100-8 1534.00 509.00 -66.82%
BenchmarkChanProdConsWork100-16 4126.00 918.00 -77.75%
BenchmarkSyscall 34.40 33.30 -3.20%
BenchmarkSyscall-2 160.00 121.00 -24.38%
BenchmarkSyscall-4 131.00 136.00 +3.82%
BenchmarkSyscall-8 139.00 131.00 -5.76%
BenchmarkSyscall-16 161.00 168.00 +4.35%
BenchmarkSyscallWork 950.00 950.00 +0.00%
BenchmarkSyscallWork-2 481.00 480.00 -0.21%
BenchmarkSyscallWork-4 268.00 270.00 +0.75%
BenchmarkSyscallWork-8 156.00 169.00 +8.33%
BenchmarkSyscallWork-16 188.00 184.00 -2.13%
BenchmarkSemaSyntNonblock 36.40 35.60 -2.20%
BenchmarkSemaSyntNonblock-2 81.40 45.10 -44.59%
BenchmarkSemaSyntNonblock-4 126.00 108.00 -14.29%
BenchmarkSemaSyntNonblock-8 112.00 112.00 +0.00%
BenchmarkSemaSyntNonblock-16 110.00 112.00 +1.82%
BenchmarkSemaSyntBlock 35.30 35.30 +0.00%
BenchmarkSemaSyntBlock-2 118.00 124.00 +5.08%
BenchmarkSemaSyntBlock-4 105.00 108.00 +2.86%
BenchmarkSemaSyntBlock-8 101.00 111.00 +9.90%
BenchmarkSemaSyntBlock-16 112.00 118.00 +5.36%
BenchmarkSemaWorkNonblock 810.00 811.00 +0.12%
BenchmarkSemaWorkNonblock-2 476.00 414.00 -13.03%
BenchmarkSemaWorkNonblock-4 238.00 228.00 -4.20%
BenchmarkSemaWorkNonblock-8 140.00 126.00 -10.00%
BenchmarkSemaWorkNonblock-16 117.00 116.00 -0.85%
BenchmarkSemaWorkBlock 810.00 811.00 +0.12%
BenchmarkSemaWorkBlock-2 454.00 466.00 +2.64%
BenchmarkSemaWorkBlock-4 243.00 241.00 -0.82%
BenchmarkSemaWorkBlock-8 145.00 137.00 -5.52%
BenchmarkSemaWorkBlock-16 132.00 123.00 -6.82%
BenchmarkContendedSemaphore 123.00 102.00 -17.07%
BenchmarkContendedSemaphore-2 34.80 34.90 +0.29%
BenchmarkContendedSemaphore-4 34.70 34.80 +0.29%
BenchmarkContendedSemaphore-8 34.70 34.70 +0.00%
BenchmarkContendedSemaphore-16 34.80 34.70 -0.29%
BenchmarkMutex 26.80 26.00 -2.99%
BenchmarkMutex-2 108.00 45.20 -58.15%
BenchmarkMutex-4 103.00 127.00 +23.30%
BenchmarkMutex-8 109.00 147.00 +34.86%
BenchmarkMutex-16 102.00 152.00 +49.02%
BenchmarkMutexSlack 27.00 26.90 -0.37%
BenchmarkMutexSlack-2 149.00 165.00 +10.74%
BenchmarkMutexSlack-4 121.00 209.00 +72.73%
BenchmarkMutexSlack-8 101.00 158.00 +56.44%
BenchmarkMutexSlack-16 97.00 129.00 +32.99%
BenchmarkMutexWork 792.00 794.00 +0.25%
BenchmarkMutexWork-2 407.00 409.00 +0.49%
BenchmarkMutexWork-4 220.00 209.00 -5.00%
BenchmarkMutexWork-8 267.00 160.00 -40.07%
BenchmarkMutexWork-16 315.00 300.00 -4.76%
BenchmarkMutexWorkSlack 792.00 793.00 +0.13%
BenchmarkMutexWorkSlack-2 406.00 404.00 -0.49%
BenchmarkMutexWorkSlack-4 225.00 212.00 -5.78%
BenchmarkMutexWorkSlack-8 268.00 136.00 -49.25%
BenchmarkMutexWorkSlack-16 300.00 300.00 +0.00%
BenchmarkRWMutexWrite100 27.10 27.00 -0.37%
BenchmarkRWMutexWrite100-2 33.10 40.80 +23.26%
BenchmarkRWMutexWrite100-4 113.00 88.10 -22.04%
BenchmarkRWMutexWrite100-8 119.00 95.30 -19.92%
BenchmarkRWMutexWrite100-16 148.00 109.00 -26.35%
BenchmarkRWMutexWrite10 29.60 29.40 -0.68%
BenchmarkRWMutexWrite10-2 111.00 61.40 -44.68%
BenchmarkRWMutexWrite10-4 270.00 208.00 -22.96%
BenchmarkRWMutexWrite10-8 204.00 185.00 -9.31%
BenchmarkRWMutexWrite10-16 261.00 190.00 -27.20%
BenchmarkRWMutexWorkWrite100 1040.00 1036.00 -0.38%
BenchmarkRWMutexWorkWrite100-2 593.00 580.00 -2.19%
BenchmarkRWMutexWorkWrite100-4 470.00 365.00 -22.34%
BenchmarkRWMutexWorkWrite100-8 468.00 289.00 -38.25%
BenchmarkRWMutexWorkWrite100-16 604.00 374.00 -38.08%
BenchmarkRWMutexWorkWrite10 951.00 951.00 +0.00%
BenchmarkRWMutexWorkWrite10-2 1001.00 928.00 -7.29%
BenchmarkRWMutexWorkWrite10-4 1555.00 1006.00 -35.31%
BenchmarkRWMutexWorkWrite10-8 2085.00 1171.00 -43.84%
BenchmarkRWMutexWorkWrite10-16 2082.00 1614.00 -22.48%
R=rsc, iant, msolo, fw, iant
CC=golang-dev
https://golang.org/cl/4711045
2011-07-29 10:44:06 -06:00
|
|
|
RET
|
|
|
|
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
TEXT runtime·xchg64(SB), NOSPLIT, $0-24
|
|
|
|
MOVQ ptr+0(FP), BX
|
|
|
|
MOVQ new+8(FP), AX
|
2013-03-05 00:46:52 -07:00
|
|
|
XCHGQ AX, 0(BX)
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ AX, ret+16(FP)
|
2013-03-05 00:46:52 -07:00
|
|
|
RET
|
|
|
|
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
TEXT runtime·xchgp(SB), NOSPLIT, $0-24
|
|
|
|
MOVQ ptr+0(FP), BX
|
|
|
|
MOVQ new+8(FP), AX
|
2014-01-22 00:27:16 -07:00
|
|
|
XCHGQ AX, 0(BX)
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ AX, ret+16(FP)
|
2014-01-22 00:27:16 -07:00
|
|
|
RET
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·procyield(SB),NOSPLIT,$0-0
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVL cycles+0(FP), AX
|
runtime: improve Linux mutex
The implementation is hybrid active/passive spin/blocking mutex.
The design minimizes amount of context switches and futex calls.
The idea is that all critical sections in runtime are intentially
small, so pure blocking mutex behaves badly causing
a lot of context switches, thread parking/unparking and kernel calls.
Note that some synthetic benchmarks become somewhat slower,
that's due to increased contention on other data structures,
it should not affect programs that do any real work.
On 2 x Intel E5620, 8 HT cores, 2.4GHz
benchmark old ns/op new ns/op delta
BenchmarkSelectContended 521.00 503.00 -3.45%
BenchmarkSelectContended-2 661.00 320.00 -51.59%
BenchmarkSelectContended-4 1139.00 629.00 -44.78%
BenchmarkSelectContended-8 2870.00 878.00 -69.41%
BenchmarkSelectContended-16 5276.00 818.00 -84.50%
BenchmarkChanContended 112.00 103.00 -8.04%
BenchmarkChanContended-2 631.00 174.00 -72.42%
BenchmarkChanContended-4 682.00 272.00 -60.12%
BenchmarkChanContended-8 1601.00 520.00 -67.52%
BenchmarkChanContended-16 3100.00 372.00 -88.00%
BenchmarkChanSync 253.00 239.00 -5.53%
BenchmarkChanSync-2 5030.00 4648.00 -7.59%
BenchmarkChanSync-4 4826.00 4694.00 -2.74%
BenchmarkChanSync-8 4778.00 4713.00 -1.36%
BenchmarkChanSync-16 5289.00 4710.00 -10.95%
BenchmarkChanProdCons0 273.00 254.00 -6.96%
BenchmarkChanProdCons0-2 599.00 400.00 -33.22%
BenchmarkChanProdCons0-4 1168.00 659.00 -43.58%
BenchmarkChanProdCons0-8 2831.00 1057.00 -62.66%
BenchmarkChanProdCons0-16 4197.00 1037.00 -75.29%
BenchmarkChanProdCons10 150.00 140.00 -6.67%
BenchmarkChanProdCons10-2 607.00 268.00 -55.85%
BenchmarkChanProdCons10-4 1137.00 404.00 -64.47%
BenchmarkChanProdCons10-8 2115.00 828.00 -60.85%
BenchmarkChanProdCons10-16 4283.00 855.00 -80.04%
BenchmarkChanProdCons100 117.00 110.00 -5.98%
BenchmarkChanProdCons100-2 558.00 218.00 -60.93%
BenchmarkChanProdCons100-4 722.00 287.00 -60.25%
BenchmarkChanProdCons100-8 1840.00 431.00 -76.58%
BenchmarkChanProdCons100-16 3394.00 448.00 -86.80%
BenchmarkChanProdConsWork0 2014.00 1996.00 -0.89%
BenchmarkChanProdConsWork0-2 1207.00 1127.00 -6.63%
BenchmarkChanProdConsWork0-4 1913.00 611.00 -68.06%
BenchmarkChanProdConsWork0-8 3016.00 949.00 -68.53%
BenchmarkChanProdConsWork0-16 4320.00 1154.00 -73.29%
BenchmarkChanProdConsWork10 1906.00 1897.00 -0.47%
BenchmarkChanProdConsWork10-2 1123.00 1033.00 -8.01%
BenchmarkChanProdConsWork10-4 1076.00 571.00 -46.93%
BenchmarkChanProdConsWork10-8 2748.00 1096.00 -60.12%
BenchmarkChanProdConsWork10-16 4600.00 1105.00 -75.98%
BenchmarkChanProdConsWork100 1884.00 1852.00 -1.70%
BenchmarkChanProdConsWork100-2 1235.00 1146.00 -7.21%
BenchmarkChanProdConsWork100-4 1217.00 619.00 -49.14%
BenchmarkChanProdConsWork100-8 1534.00 509.00 -66.82%
BenchmarkChanProdConsWork100-16 4126.00 918.00 -77.75%
BenchmarkSyscall 34.40 33.30 -3.20%
BenchmarkSyscall-2 160.00 121.00 -24.38%
BenchmarkSyscall-4 131.00 136.00 +3.82%
BenchmarkSyscall-8 139.00 131.00 -5.76%
BenchmarkSyscall-16 161.00 168.00 +4.35%
BenchmarkSyscallWork 950.00 950.00 +0.00%
BenchmarkSyscallWork-2 481.00 480.00 -0.21%
BenchmarkSyscallWork-4 268.00 270.00 +0.75%
BenchmarkSyscallWork-8 156.00 169.00 +8.33%
BenchmarkSyscallWork-16 188.00 184.00 -2.13%
BenchmarkSemaSyntNonblock 36.40 35.60 -2.20%
BenchmarkSemaSyntNonblock-2 81.40 45.10 -44.59%
BenchmarkSemaSyntNonblock-4 126.00 108.00 -14.29%
BenchmarkSemaSyntNonblock-8 112.00 112.00 +0.00%
BenchmarkSemaSyntNonblock-16 110.00 112.00 +1.82%
BenchmarkSemaSyntBlock 35.30 35.30 +0.00%
BenchmarkSemaSyntBlock-2 118.00 124.00 +5.08%
BenchmarkSemaSyntBlock-4 105.00 108.00 +2.86%
BenchmarkSemaSyntBlock-8 101.00 111.00 +9.90%
BenchmarkSemaSyntBlock-16 112.00 118.00 +5.36%
BenchmarkSemaWorkNonblock 810.00 811.00 +0.12%
BenchmarkSemaWorkNonblock-2 476.00 414.00 -13.03%
BenchmarkSemaWorkNonblock-4 238.00 228.00 -4.20%
BenchmarkSemaWorkNonblock-8 140.00 126.00 -10.00%
BenchmarkSemaWorkNonblock-16 117.00 116.00 -0.85%
BenchmarkSemaWorkBlock 810.00 811.00 +0.12%
BenchmarkSemaWorkBlock-2 454.00 466.00 +2.64%
BenchmarkSemaWorkBlock-4 243.00 241.00 -0.82%
BenchmarkSemaWorkBlock-8 145.00 137.00 -5.52%
BenchmarkSemaWorkBlock-16 132.00 123.00 -6.82%
BenchmarkContendedSemaphore 123.00 102.00 -17.07%
BenchmarkContendedSemaphore-2 34.80 34.90 +0.29%
BenchmarkContendedSemaphore-4 34.70 34.80 +0.29%
BenchmarkContendedSemaphore-8 34.70 34.70 +0.00%
BenchmarkContendedSemaphore-16 34.80 34.70 -0.29%
BenchmarkMutex 26.80 26.00 -2.99%
BenchmarkMutex-2 108.00 45.20 -58.15%
BenchmarkMutex-4 103.00 127.00 +23.30%
BenchmarkMutex-8 109.00 147.00 +34.86%
BenchmarkMutex-16 102.00 152.00 +49.02%
BenchmarkMutexSlack 27.00 26.90 -0.37%
BenchmarkMutexSlack-2 149.00 165.00 +10.74%
BenchmarkMutexSlack-4 121.00 209.00 +72.73%
BenchmarkMutexSlack-8 101.00 158.00 +56.44%
BenchmarkMutexSlack-16 97.00 129.00 +32.99%
BenchmarkMutexWork 792.00 794.00 +0.25%
BenchmarkMutexWork-2 407.00 409.00 +0.49%
BenchmarkMutexWork-4 220.00 209.00 -5.00%
BenchmarkMutexWork-8 267.00 160.00 -40.07%
BenchmarkMutexWork-16 315.00 300.00 -4.76%
BenchmarkMutexWorkSlack 792.00 793.00 +0.13%
BenchmarkMutexWorkSlack-2 406.00 404.00 -0.49%
BenchmarkMutexWorkSlack-4 225.00 212.00 -5.78%
BenchmarkMutexWorkSlack-8 268.00 136.00 -49.25%
BenchmarkMutexWorkSlack-16 300.00 300.00 +0.00%
BenchmarkRWMutexWrite100 27.10 27.00 -0.37%
BenchmarkRWMutexWrite100-2 33.10 40.80 +23.26%
BenchmarkRWMutexWrite100-4 113.00 88.10 -22.04%
BenchmarkRWMutexWrite100-8 119.00 95.30 -19.92%
BenchmarkRWMutexWrite100-16 148.00 109.00 -26.35%
BenchmarkRWMutexWrite10 29.60 29.40 -0.68%
BenchmarkRWMutexWrite10-2 111.00 61.40 -44.68%
BenchmarkRWMutexWrite10-4 270.00 208.00 -22.96%
BenchmarkRWMutexWrite10-8 204.00 185.00 -9.31%
BenchmarkRWMutexWrite10-16 261.00 190.00 -27.20%
BenchmarkRWMutexWorkWrite100 1040.00 1036.00 -0.38%
BenchmarkRWMutexWorkWrite100-2 593.00 580.00 -2.19%
BenchmarkRWMutexWorkWrite100-4 470.00 365.00 -22.34%
BenchmarkRWMutexWorkWrite100-8 468.00 289.00 -38.25%
BenchmarkRWMutexWorkWrite100-16 604.00 374.00 -38.08%
BenchmarkRWMutexWorkWrite10 951.00 951.00 +0.00%
BenchmarkRWMutexWorkWrite10-2 1001.00 928.00 -7.29%
BenchmarkRWMutexWorkWrite10-4 1555.00 1006.00 -35.31%
BenchmarkRWMutexWorkWrite10-8 2085.00 1171.00 -43.84%
BenchmarkRWMutexWorkWrite10-16 2082.00 1614.00 -22.48%
R=rsc, iant, msolo, fw, iant
CC=golang-dev
https://golang.org/cl/4711045
2011-07-29 10:44:06 -06:00
|
|
|
again:
|
|
|
|
PAUSE
|
|
|
|
SUBL $1, AX
|
|
|
|
JNZ again
|
|
|
|
RET
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·atomicstorep(SB), NOSPLIT, $0-16
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ ptr+0(FP), BX
|
|
|
|
MOVQ val+8(FP), AX
|
2011-07-13 12:22:41 -06:00
|
|
|
XCHGQ AX, 0(BX)
|
|
|
|
RET
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·atomicstore(SB), NOSPLIT, $0-12
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ ptr+0(FP), BX
|
|
|
|
MOVL val+8(FP), AX
|
2011-07-29 11:47:24 -06:00
|
|
|
XCHGL AX, 0(BX)
|
|
|
|
RET
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·atomicstore64(SB), NOSPLIT, $0-16
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ ptr+0(FP), BX
|
|
|
|
MOVQ val+8(FP), AX
|
2012-04-05 08:47:43 -06:00
|
|
|
XCHGQ AX, 0(BX)
|
|
|
|
RET
|
|
|
|
|
2014-08-19 07:38:00 -06:00
|
|
|
// void runtime·atomicor8(byte volatile*, byte);
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
TEXT runtime·atomicor8(SB), NOSPLIT, $0-9
|
2014-08-19 07:38:00 -06:00
|
|
|
MOVQ ptr+0(FP), AX
|
|
|
|
MOVB val+8(FP), BX
|
|
|
|
LOCK
|
|
|
|
ORB BX, (AX)
|
|
|
|
RET
|
|
|
|
|
2009-06-03 00:02:12 -06:00
|
|
|
// void jmpdefer(fn, sp);
|
|
|
|
// called from deferreturn.
|
2009-01-27 13:03:53 -07:00
|
|
|
// 1. pop the caller
|
|
|
|
// 2. sub 5 bytes from the callers return
|
|
|
|
// 3. jmp to the argument
|
2013-08-07 15:03:50 -06:00
|
|
|
TEXT runtime·jmpdefer(SB), NOSPLIT, $0-16
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ fv+0(FP), DX // fn
|
|
|
|
MOVQ argp+8(FP), BX // caller sp
|
2009-06-03 00:02:12 -06:00
|
|
|
LEAQ -8(BX), SP // caller sp after CALL
|
|
|
|
SUBQ $5, (SP) // return to CALL again
|
2013-02-22 08:47:54 -07:00
|
|
|
MOVQ 0(DX), BX
|
2013-02-21 15:01:13 -07:00
|
|
|
JMP BX // but first run the deferred function
|
2009-10-03 11:37:12 -06:00
|
|
|
|
2013-06-12 13:22:26 -06:00
|
|
|
// Save state of caller into g->sched. Smashes R8, R9.
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT gosave<>(SB),NOSPLIT,$0
|
2013-06-12 13:22:26 -06:00
|
|
|
get_tls(R8)
|
|
|
|
MOVQ g(R8), R8
|
|
|
|
MOVQ 0(SP), R9
|
|
|
|
MOVQ R9, (g_sched+gobuf_pc)(R8)
|
|
|
|
LEAQ 8(SP), R9
|
|
|
|
MOVQ R9, (g_sched+gobuf_sp)(R8)
|
|
|
|
MOVQ $0, (g_sched+gobuf_ret)(R8)
|
|
|
|
MOVQ $0, (g_sched+gobuf_ctxt)(R8)
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
RET
|
|
|
|
|
|
|
|
// asmcgocall(void(*fn)(void*), void *arg)
|
2009-10-12 11:26:38 -06:00
|
|
|
// Call fn(arg) on the scheduler stack,
|
|
|
|
// aligned appropriately for the gcc ABI.
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
// See cgocall.c for more details.
|
2014-09-03 22:01:55 -06:00
|
|
|
TEXT runtime·asmcgocall(SB),NOSPLIT,$0-16
|
2014-09-03 09:36:14 -06:00
|
|
|
MOVQ fn+0(FP), AX
|
|
|
|
MOVQ arg+8(FP), BX
|
2014-09-03 22:01:55 -06:00
|
|
|
CALL asmcgocall<>(SB)
|
2014-09-03 09:36:14 -06:00
|
|
|
RET
|
|
|
|
|
|
|
|
TEXT runtime·asmcgocall_errno(SB),NOSPLIT,$0-20
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ fn+0(FP), AX
|
|
|
|
MOVQ arg+8(FP), BX
|
2014-09-03 22:01:55 -06:00
|
|
|
CALL asmcgocall<>(SB)
|
|
|
|
MOVL AX, ret+16(FP)
|
|
|
|
RET
|
|
|
|
|
|
|
|
// asmcgocall common code. fn in AX, arg in BX. returns errno in AX.
|
|
|
|
TEXT asmcgocall<>(SB),NOSPLIT,$0-0
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ SP, DX
|
2009-10-12 11:26:38 -06:00
|
|
|
|
|
|
|
// Figure out if we need to switch to m->g0 stack.
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
// We get called to create new OS threads too, and those
|
|
|
|
// come in on the m->g0 stack already.
|
|
|
|
get_tls(CX)
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ g(CX), BP
|
|
|
|
MOVQ g_m(BP), BP
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ m_g0(BP), SI
|
|
|
|
MOVQ g(CX), DI
|
|
|
|
CMPQ SI, DI
|
2014-01-16 21:58:10 -07:00
|
|
|
JEQ nosave
|
|
|
|
MOVQ m_gsignal(BP), SI
|
|
|
|
CMPQ SI, DI
|
|
|
|
JEQ nosave
|
|
|
|
|
|
|
|
MOVQ m_g0(BP), SI
|
2013-06-12 13:22:26 -06:00
|
|
|
CALL gosave<>(SB)
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ SI, g(CX)
|
|
|
|
MOVQ (g_sched+gobuf_sp)(SI), SP
|
2014-01-16 21:58:10 -07:00
|
|
|
nosave:
|
2009-10-12 11:26:38 -06:00
|
|
|
|
|
|
|
// Now on a scheduling stack (a pthread-created stack).
|
2012-09-02 20:12:51 -06:00
|
|
|
// Make sure we have enough room for 4 stack-backed fast-call
|
|
|
|
// registers as per windows amd64 calling convention.
|
|
|
|
SUBQ $64, SP
|
2009-10-03 11:37:12 -06:00
|
|
|
ANDQ $~15, SP // alignment for gcc ABI
|
2012-09-02 20:12:51 -06:00
|
|
|
MOVQ DI, 48(SP) // save g
|
|
|
|
MOVQ DX, 40(SP) // save SP
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ BX, DI // DI = first argument in AMD64 ABI
|
2011-07-19 08:47:33 -06:00
|
|
|
MOVQ BX, CX // CX = first argument in Win64
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
CALL AX
|
2009-10-12 11:26:38 -06:00
|
|
|
|
2010-08-04 18:50:22 -06:00
|
|
|
// Restore registers, g, stack pointer.
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
get_tls(CX)
|
2012-09-02 20:12:51 -06:00
|
|
|
MOVQ 48(SP), DI
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ DI, g(CX)
|
2012-09-02 20:12:51 -06:00
|
|
|
MOVQ 40(SP), SP
|
2009-10-03 11:37:12 -06:00
|
|
|
RET
|
|
|
|
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
// cgocallback(void (*fn)(void*), void *frame, uintptr framesize)
|
2013-02-22 14:08:56 -07:00
|
|
|
// Turn the fn into a Go func (by taking its address) and call
|
|
|
|
// cgocallback_gofunc.
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·cgocallback(SB),NOSPLIT,$24-24
|
2013-02-22 14:08:56 -07:00
|
|
|
LEAQ fn+0(FP), AX
|
|
|
|
MOVQ AX, 0(SP)
|
|
|
|
MOVQ frame+8(FP), AX
|
|
|
|
MOVQ AX, 8(SP)
|
|
|
|
MOVQ framesize+16(FP), AX
|
|
|
|
MOVQ AX, 16(SP)
|
|
|
|
MOVQ $runtime·cgocallback_gofunc(SB), AX
|
|
|
|
CALL AX
|
|
|
|
RET
|
|
|
|
|
|
|
|
// cgocallback_gofunc(FuncVal*, void *frame, uintptr framesize)
|
|
|
|
// See cgocall.c for more details.
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·cgocallback_gofunc(SB),NOSPLIT,$8-24
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
// If g is nil, Go did not create the current thread.
|
|
|
|
// Call needm to obtain one m for temporary use.
|
2013-02-20 15:48:23 -07:00
|
|
|
// In this case, we're running on the thread stack, so there's
|
|
|
|
// lots of space, but the linker doesn't know. Hide the call from
|
|
|
|
// the linker analysis by using an indirect call through AX.
|
2010-08-04 18:50:22 -06:00
|
|
|
get_tls(CX)
|
2013-02-20 15:48:23 -07:00
|
|
|
#ifdef GOOS_windows
|
2013-07-23 16:40:02 -06:00
|
|
|
MOVL $0, BP
|
2013-02-20 15:48:23 -07:00
|
|
|
CMPQ CX, $0
|
2013-07-23 20:59:32 -06:00
|
|
|
JEQ 2(PC)
|
2013-02-20 15:48:23 -07:00
|
|
|
#endif
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ g(CX), BP
|
2012-03-08 10:12:40 -07:00
|
|
|
CMPQ BP, $0
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
JEQ needm
|
|
|
|
MOVQ g_m(BP), BP
|
|
|
|
MOVQ BP, R8 // holds oldm until end of function
|
|
|
|
JMP havem
|
2013-02-20 15:48:23 -07:00
|
|
|
needm:
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ $0, 0(SP)
|
2013-02-20 15:48:23 -07:00
|
|
|
MOVQ $runtime·needm(SB), AX
|
|
|
|
CALL AX
|
2013-07-24 07:01:57 -06:00
|
|
|
MOVQ 0(SP), R8
|
2013-02-20 15:48:23 -07:00
|
|
|
get_tls(CX)
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ g(CX), BP
|
|
|
|
MOVQ g_m(BP), BP
|
2012-03-08 10:12:40 -07:00
|
|
|
|
2013-02-20 15:48:23 -07:00
|
|
|
havem:
|
|
|
|
// Now there's a valid m, and we're running on its m->g0.
|
|
|
|
// Save current m->g0->sched.sp on stack and then set it to SP.
|
|
|
|
// Save current sp in m->g0->sched.sp in preparation for
|
|
|
|
// switch back to m->curg stack.
|
2013-07-23 16:40:02 -06:00
|
|
|
// NOTE: unwindm knows that the saved g->sched.sp is at 0(SP).
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ m_g0(BP), SI
|
2013-07-23 16:40:02 -06:00
|
|
|
MOVQ (g_sched+gobuf_sp)(SI), AX
|
|
|
|
MOVQ AX, 0(SP)
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ SP, (g_sched+gobuf_sp)(SI)
|
|
|
|
|
2013-07-23 16:40:02 -06:00
|
|
|
// Switch to m->curg stack and call runtime.cgocallbackg.
|
|
|
|
// Because we are taking over the execution of m->curg
|
|
|
|
// but *not* resuming what had been running, we need to
|
|
|
|
// save that information (m->curg->sched) so we can restore it.
|
2013-06-05 05:16:53 -06:00
|
|
|
// We can restore m->curg->sched.sp easily, because calling
|
2011-08-18 10:17:09 -06:00
|
|
|
// runtime.cgocallbackg leaves SP unchanged upon return.
|
2013-06-05 05:16:53 -06:00
|
|
|
// To save m->curg->sched.pc, we push it onto the stack.
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
// This has the added benefit that it looks to the traceback
|
2011-08-18 10:17:09 -06:00
|
|
|
// routine like cgocallbackg is going to return to that
|
2013-07-23 16:40:02 -06:00
|
|
|
// PC (because the frame we allocate below has the same
|
|
|
|
// size as cgocallback_gofunc's frame declared above)
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
// so that the traceback will seamlessly trace back into
|
|
|
|
// the earlier calls.
|
2013-07-23 16:40:02 -06:00
|
|
|
//
|
2013-07-24 07:01:57 -06:00
|
|
|
// In the new goroutine, 0(SP) holds the saved R8.
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ m_curg(BP), SI
|
|
|
|
MOVQ SI, g(CX)
|
|
|
|
MOVQ (g_sched+gobuf_sp)(SI), DI // prepare stack as DI
|
|
|
|
MOVQ (g_sched+gobuf_pc)(SI), BP
|
2013-07-23 16:40:02 -06:00
|
|
|
MOVQ BP, -8(DI)
|
2013-07-24 07:01:57 -06:00
|
|
|
LEAQ -(8+8)(DI), SP
|
|
|
|
MOVQ R8, 0(SP)
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
CALL runtime·cgocallbackg(SB)
|
2013-07-24 07:01:57 -06:00
|
|
|
MOVQ 0(SP), R8
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
|
2013-06-05 05:16:53 -06:00
|
|
|
// Restore g->sched (== m->curg->sched) from saved values.
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
get_tls(CX)
|
|
|
|
MOVQ g(CX), SI
|
2013-07-24 07:01:57 -06:00
|
|
|
MOVQ 8(SP), BP
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ BP, (g_sched+gobuf_pc)(SI)
|
2013-07-24 07:01:57 -06:00
|
|
|
LEAQ (8+8)(SP), DI
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ DI, (g_sched+gobuf_sp)(SI)
|
|
|
|
|
|
|
|
// Switch back to m->g0's stack and restore m->g0->sched.sp.
|
|
|
|
// (Unlike m->curg, the g0 goroutine never uses sched.pc,
|
|
|
|
// so we do not have to restore it.)
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ g(CX), BP
|
|
|
|
MOVQ g_m(BP), BP
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
MOVQ m_g0(BP), SI
|
|
|
|
MOVQ SI, g(CX)
|
|
|
|
MOVQ (g_sched+gobuf_sp)(SI), SP
|
2013-07-23 16:40:02 -06:00
|
|
|
MOVQ 0(SP), AX
|
|
|
|
MOVQ AX, (g_sched+gobuf_sp)(SI)
|
2013-02-20 15:48:23 -07:00
|
|
|
|
|
|
|
// If the m on entry was nil, we called needm above to borrow an m
|
|
|
|
// for the duration of the call. Since the call is over, return it with dropm.
|
2013-07-24 07:01:57 -06:00
|
|
|
CMPQ R8, $0
|
2013-02-20 15:48:23 -07:00
|
|
|
JNE 3(PC)
|
|
|
|
MOVQ $runtime·dropm(SB), AX
|
|
|
|
CALL AX
|
runtime: scheduler, cgo reorganization
* Change use of m->g0 stack (aka scheduler stack).
* Provide runtime.mcall(f) to invoke f() on m->g0 stack.
* Replace scheduler loop entry with runtime.mcall(schedule).
Runtime.mcall eliminates the need for fake scheduler states that
exist just to run a bit of code on the m->g0 stack
(Grecovery, Gstackalloc).
The elimination of the scheduler as a loop that stops and
starts using gosave and gogo fixes a bad interaction with the
way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled)
C functions on that stack, and then when calling back into Go,
it sets m->g0->sched.sp below the added call frames, so that
other uses of m->g0's stack will not interfere with those frames.
Unfortunately, gogo (longjmp) back to the scheduler loop at
this point would end up running scheduler with the lower
sp, which no longer points at a valid stack frame for
a call to scheduler. If scheduler then wrote any function call
arguments or local variables to where it expected the stack
frame to be, it would overwrite other data on the stack.
I realized this possibility while debugging a problem with
calling complex Go code in a Go -> C -> Go cgo callback.
This wasn't the bug I was looking for, it turns out, but I believe
it is a real bug nonetheless. Switching to runtime.mcall, which
only adds new frames to the stack and never jumps into
functions running in existing ones, fixes this bug.
* Move cgo-related code out of proc.c into cgocall.c.
* Add very large comment describing cgo call sequences.
* Simpilify, regularize cgo function implementations and names.
* Add test suite as misc/cgo/test.
Now the Go -> C path calls cgocall, which calls asmcgocall,
and the C -> Go path calls cgocallback, which calls cgocallbackg.
The shuffling, which affects mainly the callback case, moves
most of the callback implementation to cgocallback running
on the m->curg stack (not the m->g0 scheduler stack) and
only while accounted for with $GOMAXPROCS (between calls
to exitsyscall and entersyscall).
The previous callback code did not block in startcgocallback's
approximation to exitsyscall, so if, say, the garbage collector
were running, it would still barge in and start doing things
like call malloc. Similarly endcgocallback's approximation of
entersyscall did not call matchmg to kick off new OS threads
when necessary, which caused the bug in issue 1560.
Fixes #1560.
R=iant
CC=golang-dev
https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
|
|
|
|
|
|
|
// Done!
|
2010-04-09 14:30:11 -06:00
|
|
|
RET
|
|
|
|
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
// void setg(G*); set g. for use by needm.
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
TEXT runtime·setg(SB), NOSPLIT, $0-8
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ gg+0(FP), BX
|
2013-02-20 15:48:23 -07:00
|
|
|
#ifdef GOOS_windows
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
CMPQ BX, $0
|
2013-02-20 15:48:23 -07:00
|
|
|
JNE settls
|
|
|
|
MOVQ $0, 0x28(GS)
|
|
|
|
RET
|
|
|
|
settls:
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ g_m(BX), AX
|
2013-02-20 15:48:23 -07:00
|
|
|
LEAQ m_tls(AX), AX
|
|
|
|
MOVQ AX, 0x28(GS)
|
|
|
|
#endif
|
|
|
|
get_tls(CX)
|
|
|
|
MOVQ BX, g(CX)
|
|
|
|
RET
|
|
|
|
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
// void setg_gcc(G*); set g called from gcc.
|
|
|
|
TEXT setg_gcc<>(SB),NOSPLIT,$0
|
2013-03-25 16:14:02 -06:00
|
|
|
get_tls(AX)
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ DI, g(AX)
|
2013-03-25 16:14:02 -06:00
|
|
|
RET
|
|
|
|
|
2009-12-08 19:19:30 -07:00
|
|
|
// check that SP is in range [g->stackbase, g->stackguard)
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·stackcheck(SB), NOSPLIT, $0-0
|
2010-08-04 18:50:22 -06:00
|
|
|
get_tls(CX)
|
|
|
|
MOVQ g(CX), AX
|
|
|
|
CMPQ g_stackbase(AX), SP
|
2010-03-30 11:53:16 -06:00
|
|
|
JHI 2(PC)
|
|
|
|
INT $3
|
2010-08-04 18:50:22 -06:00
|
|
|
CMPQ SP, g_stackguard(AX)
|
2010-03-30 11:53:16 -06:00
|
|
|
JHI 2(PC)
|
|
|
|
INT $3
|
|
|
|
RET
|
|
|
|
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
TEXT runtime·getcallerpc(SB),NOSPLIT,$0-16
|
|
|
|
MOVQ argp+0(FP),AX // addr of first arg
|
2010-04-05 13:51:09 -06:00
|
|
|
MOVQ -8(AX),AX // get calling pc
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ AX, ret+8(FP)
|
2010-04-05 13:51:09 -06:00
|
|
|
RET
|
|
|
|
|
2014-06-17 22:59:50 -06:00
|
|
|
TEXT runtime·gogetcallerpc(SB),NOSPLIT,$0-16
|
|
|
|
MOVQ p+0(FP),AX // addr of first arg
|
2014-06-17 00:03:03 -06:00
|
|
|
MOVQ -8(AX),AX // get calling pc
|
2014-06-17 22:59:50 -06:00
|
|
|
MOVQ AX,ret+8(FP)
|
2014-06-17 00:03:03 -06:00
|
|
|
RET
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·setcallerpc(SB),NOSPLIT,$0-16
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ argp+0(FP),AX // addr of first arg
|
|
|
|
MOVQ pc+8(FP), BX
|
2010-04-05 13:51:09 -06:00
|
|
|
MOVQ BX, -8(AX) // set calling pc
|
|
|
|
RET
|
|
|
|
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
TEXT runtime·getcallersp(SB),NOSPLIT,$0-16
|
|
|
|
MOVQ argp+0(FP), AX
|
|
|
|
MOVQ AX, ret+8(FP)
|
2010-04-05 13:51:09 -06:00
|
|
|
RET
|
|
|
|
|
2014-08-26 00:34:46 -06:00
|
|
|
// func gogetcallersp(p unsafe.Pointer) uintptr
|
|
|
|
TEXT runtime·gogetcallersp(SB),NOSPLIT,$0-16
|
|
|
|
MOVQ p+0(FP),AX // addr of first arg
|
|
|
|
MOVQ AX, ret+8(FP)
|
|
|
|
RET
|
|
|
|
|
2012-02-02 12:09:27 -07:00
|
|
|
// int64 runtime·cputicks(void)
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·cputicks(SB),NOSPLIT,$0-0
|
2012-02-02 12:09:27 -07:00
|
|
|
RDTSC
|
|
|
|
SHLQ $32, DX
|
|
|
|
ADDQ DX, AX
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ AX, ret+0(FP)
|
2012-02-02 12:09:27 -07:00
|
|
|
RET
|
|
|
|
|
2014-08-21 10:41:09 -06:00
|
|
|
TEXT runtime·gocputicks(SB),NOSPLIT,$0-8
|
|
|
|
RDTSC
|
2014-08-22 11:27:25 -06:00
|
|
|
SHLQ $32, DX
|
|
|
|
ADDQ DX, AX
|
|
|
|
MOVQ AX, ret+0(FP)
|
2014-08-21 10:41:09 -06:00
|
|
|
RET
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·stackguard(SB),NOSPLIT,$0-16
|
2012-03-15 13:22:30 -06:00
|
|
|
MOVQ SP, DX
|
|
|
|
MOVQ DX, sp+0(FP)
|
|
|
|
get_tls(CX)
|
|
|
|
MOVQ g(CX), BX
|
|
|
|
MOVQ g_stackguard(BX), DX
|
2013-03-22 10:57:55 -06:00
|
|
|
MOVQ DX, limit+8(FP)
|
2012-03-15 13:22:30 -06:00
|
|
|
RET
|
|
|
|
|
runtime: ,s/[a-zA-Z0-9_]+/runtime·&/g, almost
Prefix all external symbols in runtime by runtime·,
to avoid conflicts with possible symbols of the same
name in linked-in C libraries. The obvious conflicts
are printf, malloc, and free, but hide everything to
avoid future pain.
The symbols left alone are:
** known to cgo **
_cgo_free
_cgo_malloc
libcgo_thread_start
initcgo
ncgocall
** known to linker **
_rt0_$GOARCH
_rt0_$GOARCH_$GOOS
text
etext
data
end
pclntab
epclntab
symtab
esymtab
** known to C compiler **
_divv
_modv
_div64by32
etc (arch specific)
Tested on darwin/386, darwin/amd64, linux/386, linux/amd64.
Built (but not tested) for freebsd/386, freebsd/amd64, linux/arm, windows/386.
R=r, PeterGo
CC=golang-dev
https://golang.org/cl/2899041
2010-11-04 12:00:19 -06:00
|
|
|
GLOBL runtime·tls0(SB), $64
|
2013-03-12 11:47:44 -06:00
|
|
|
|
|
|
|
// hash function using AES hardware instructions
|
2014-07-31 16:07:05 -06:00
|
|
|
TEXT runtime·aeshash(SB),NOSPLIT,$0-32
|
|
|
|
MOVQ p+0(FP), AX // ptr to data
|
|
|
|
MOVQ s+8(FP), CX // size
|
2013-03-12 11:47:44 -06:00
|
|
|
JMP runtime·aeshashbody(SB)
|
|
|
|
|
2014-07-31 16:07:05 -06:00
|
|
|
TEXT runtime·aeshashstr(SB),NOSPLIT,$0-32
|
|
|
|
MOVQ p+0(FP), AX // ptr to string struct
|
|
|
|
// s+8(FP) is ignored, it is always sizeof(String)
|
2013-03-12 11:47:44 -06:00
|
|
|
MOVQ 8(AX), CX // length of string
|
|
|
|
MOVQ (AX), AX // string data
|
|
|
|
JMP runtime·aeshashbody(SB)
|
|
|
|
|
|
|
|
// AX: data
|
|
|
|
// CX: length
|
2014-07-31 16:07:05 -06:00
|
|
|
TEXT runtime·aeshashbody(SB),NOSPLIT,$0-32
|
|
|
|
MOVQ h+16(FP), X0 // seed to low 64 bits of xmm0
|
2013-03-12 11:47:44 -06:00
|
|
|
PINSRQ $1, CX, X0 // size to high 64 bits of xmm0
|
2013-03-20 15:34:26 -06:00
|
|
|
MOVO runtime·aeskeysched+0(SB), X2
|
|
|
|
MOVO runtime·aeskeysched+16(SB), X3
|
2013-05-15 10:40:14 -06:00
|
|
|
CMPQ CX, $16
|
|
|
|
JB aessmall
|
2013-03-12 11:47:44 -06:00
|
|
|
aesloop:
|
|
|
|
CMPQ CX, $16
|
2013-05-15 10:40:14 -06:00
|
|
|
JBE aesloopend
|
2013-03-12 11:47:44 -06:00
|
|
|
MOVOU (AX), X1
|
|
|
|
AESENC X2, X0
|
|
|
|
AESENC X1, X0
|
|
|
|
SUBQ $16, CX
|
|
|
|
ADDQ $16, AX
|
|
|
|
JMP aesloop
|
2013-05-15 10:40:14 -06:00
|
|
|
// 1-16 bytes remaining
|
2013-03-12 11:47:44 -06:00
|
|
|
aesloopend:
|
2013-05-15 10:40:14 -06:00
|
|
|
// This load may overlap with the previous load above.
|
|
|
|
// We'll hash some bytes twice, but that's ok.
|
|
|
|
MOVOU -16(AX)(CX*1), X1
|
|
|
|
JMP partial
|
|
|
|
// 0-15 bytes
|
|
|
|
aessmall:
|
2013-03-12 11:47:44 -06:00
|
|
|
TESTQ CX, CX
|
2013-05-15 10:40:14 -06:00
|
|
|
JE finalize // 0 bytes
|
2013-03-12 11:47:44 -06:00
|
|
|
|
2013-05-15 10:40:14 -06:00
|
|
|
CMPB AX, $0xf0
|
|
|
|
JA highpartial
|
2013-03-12 11:47:44 -06:00
|
|
|
|
2013-05-15 10:40:14 -06:00
|
|
|
// 16 bytes loaded at this address won't cross
|
|
|
|
// a page boundary, so we can load it directly.
|
2013-03-12 11:47:44 -06:00
|
|
|
MOVOU (AX), X1
|
|
|
|
ADDQ CX, CX
|
2014-08-12 17:51:20 -06:00
|
|
|
MOVQ $masks<>(SB), BP
|
|
|
|
PAND (BP)(CX*8), X1
|
2013-03-12 11:47:44 -06:00
|
|
|
JMP partial
|
|
|
|
highpartial:
|
2013-05-15 10:40:14 -06:00
|
|
|
// address ends in 1111xxxx. Might be up against
|
2013-03-12 11:47:44 -06:00
|
|
|
// a page boundary, so load ending at last byte.
|
|
|
|
// Then shift bytes down using pshufb.
|
|
|
|
MOVOU -16(AX)(CX*1), X1
|
|
|
|
ADDQ CX, CX
|
2014-08-12 17:51:20 -06:00
|
|
|
MOVQ $shifts<>(SB), BP
|
|
|
|
PSHUFB (BP)(CX*8), X1
|
2013-03-12 11:47:44 -06:00
|
|
|
partial:
|
|
|
|
// incorporate partial block into hash
|
|
|
|
AESENC X3, X0
|
|
|
|
AESENC X1, X0
|
|
|
|
finalize:
|
|
|
|
// finalize hash
|
|
|
|
AESENC X2, X0
|
|
|
|
AESENC X3, X0
|
|
|
|
AESENC X2, X0
|
2014-07-31 16:07:05 -06:00
|
|
|
MOVQ X0, res+24(FP)
|
2013-03-12 11:47:44 -06:00
|
|
|
RET
|
|
|
|
|
2014-07-31 16:07:05 -06:00
|
|
|
TEXT runtime·aeshash32(SB),NOSPLIT,$0-32
|
|
|
|
MOVQ p+0(FP), AX // ptr to data
|
|
|
|
// s+8(FP) is ignored, it is always sizeof(int32)
|
|
|
|
MOVQ h+16(FP), X0 // seed
|
2013-03-12 11:47:44 -06:00
|
|
|
PINSRD $2, (AX), X0 // data
|
2013-03-20 15:34:26 -06:00
|
|
|
AESENC runtime·aeskeysched+0(SB), X0
|
|
|
|
AESENC runtime·aeskeysched+16(SB), X0
|
|
|
|
AESENC runtime·aeskeysched+0(SB), X0
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ X0, ret+24(FP)
|
2013-03-12 11:47:44 -06:00
|
|
|
RET
|
|
|
|
|
2014-07-31 16:07:05 -06:00
|
|
|
TEXT runtime·aeshash64(SB),NOSPLIT,$0-32
|
|
|
|
MOVQ p+0(FP), AX // ptr to data
|
|
|
|
// s+8(FP) is ignored, it is always sizeof(int64)
|
|
|
|
MOVQ h+16(FP), X0 // seed
|
2013-03-12 11:47:44 -06:00
|
|
|
PINSRQ $1, (AX), X0 // data
|
2013-03-20 15:34:26 -06:00
|
|
|
AESENC runtime·aeskeysched+0(SB), X0
|
|
|
|
AESENC runtime·aeskeysched+16(SB), X0
|
|
|
|
AESENC runtime·aeskeysched+0(SB), X0
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ X0, ret+24(FP)
|
2013-03-12 11:47:44 -06:00
|
|
|
RET
|
|
|
|
|
|
|
|
// simple mask to get rid of data in the high part of the register.
|
2013-07-16 14:24:09 -06:00
|
|
|
DATA masks<>+0x00(SB)/8, $0x0000000000000000
|
|
|
|
DATA masks<>+0x08(SB)/8, $0x0000000000000000
|
|
|
|
DATA masks<>+0x10(SB)/8, $0x00000000000000ff
|
|
|
|
DATA masks<>+0x18(SB)/8, $0x0000000000000000
|
|
|
|
DATA masks<>+0x20(SB)/8, $0x000000000000ffff
|
|
|
|
DATA masks<>+0x28(SB)/8, $0x0000000000000000
|
|
|
|
DATA masks<>+0x30(SB)/8, $0x0000000000ffffff
|
|
|
|
DATA masks<>+0x38(SB)/8, $0x0000000000000000
|
|
|
|
DATA masks<>+0x40(SB)/8, $0x00000000ffffffff
|
|
|
|
DATA masks<>+0x48(SB)/8, $0x0000000000000000
|
|
|
|
DATA masks<>+0x50(SB)/8, $0x000000ffffffffff
|
|
|
|
DATA masks<>+0x58(SB)/8, $0x0000000000000000
|
|
|
|
DATA masks<>+0x60(SB)/8, $0x0000ffffffffffff
|
|
|
|
DATA masks<>+0x68(SB)/8, $0x0000000000000000
|
|
|
|
DATA masks<>+0x70(SB)/8, $0x00ffffffffffffff
|
|
|
|
DATA masks<>+0x78(SB)/8, $0x0000000000000000
|
|
|
|
DATA masks<>+0x80(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA masks<>+0x88(SB)/8, $0x0000000000000000
|
|
|
|
DATA masks<>+0x90(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA masks<>+0x98(SB)/8, $0x00000000000000ff
|
|
|
|
DATA masks<>+0xa0(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA masks<>+0xa8(SB)/8, $0x000000000000ffff
|
|
|
|
DATA masks<>+0xb0(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA masks<>+0xb8(SB)/8, $0x0000000000ffffff
|
|
|
|
DATA masks<>+0xc0(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA masks<>+0xc8(SB)/8, $0x00000000ffffffff
|
|
|
|
DATA masks<>+0xd0(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA masks<>+0xd8(SB)/8, $0x000000ffffffffff
|
|
|
|
DATA masks<>+0xe0(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA masks<>+0xe8(SB)/8, $0x0000ffffffffffff
|
|
|
|
DATA masks<>+0xf0(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA masks<>+0xf8(SB)/8, $0x00ffffffffffffff
|
2013-08-07 11:23:24 -06:00
|
|
|
GLOBL masks<>(SB),RODATA,$256
|
2013-07-16 14:24:09 -06:00
|
|
|
|
|
|
|
// these are arguments to pshufb. They move data down from
|
|
|
|
// the high bytes of the register to the low bytes of the register.
|
|
|
|
// index is how many bytes to move.
|
|
|
|
DATA shifts<>+0x00(SB)/8, $0x0000000000000000
|
|
|
|
DATA shifts<>+0x08(SB)/8, $0x0000000000000000
|
|
|
|
DATA shifts<>+0x10(SB)/8, $0xffffffffffffff0f
|
|
|
|
DATA shifts<>+0x18(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA shifts<>+0x20(SB)/8, $0xffffffffffff0f0e
|
|
|
|
DATA shifts<>+0x28(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA shifts<>+0x30(SB)/8, $0xffffffffff0f0e0d
|
|
|
|
DATA shifts<>+0x38(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA shifts<>+0x40(SB)/8, $0xffffffff0f0e0d0c
|
|
|
|
DATA shifts<>+0x48(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA shifts<>+0x50(SB)/8, $0xffffff0f0e0d0c0b
|
|
|
|
DATA shifts<>+0x58(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA shifts<>+0x60(SB)/8, $0xffff0f0e0d0c0b0a
|
|
|
|
DATA shifts<>+0x68(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA shifts<>+0x70(SB)/8, $0xff0f0e0d0c0b0a09
|
|
|
|
DATA shifts<>+0x78(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA shifts<>+0x80(SB)/8, $0x0f0e0d0c0b0a0908
|
|
|
|
DATA shifts<>+0x88(SB)/8, $0xffffffffffffffff
|
|
|
|
DATA shifts<>+0x90(SB)/8, $0x0e0d0c0b0a090807
|
|
|
|
DATA shifts<>+0x98(SB)/8, $0xffffffffffffff0f
|
|
|
|
DATA shifts<>+0xa0(SB)/8, $0x0d0c0b0a09080706
|
|
|
|
DATA shifts<>+0xa8(SB)/8, $0xffffffffffff0f0e
|
|
|
|
DATA shifts<>+0xb0(SB)/8, $0x0c0b0a0908070605
|
|
|
|
DATA shifts<>+0xb8(SB)/8, $0xffffffffff0f0e0d
|
|
|
|
DATA shifts<>+0xc0(SB)/8, $0x0b0a090807060504
|
|
|
|
DATA shifts<>+0xc8(SB)/8, $0xffffffff0f0e0d0c
|
|
|
|
DATA shifts<>+0xd0(SB)/8, $0x0a09080706050403
|
|
|
|
DATA shifts<>+0xd8(SB)/8, $0xffffff0f0e0d0c0b
|
|
|
|
DATA shifts<>+0xe0(SB)/8, $0x0908070605040302
|
|
|
|
DATA shifts<>+0xe8(SB)/8, $0xffff0f0e0d0c0b0a
|
|
|
|
DATA shifts<>+0xf0(SB)/8, $0x0807060504030201
|
|
|
|
DATA shifts<>+0xf8(SB)/8, $0xff0f0e0d0c0b0a09
|
2013-08-07 11:23:24 -06:00
|
|
|
GLOBL shifts<>(SB),RODATA,$256
|
2013-07-16 14:24:09 -06:00
|
|
|
|
2014-08-07 15:52:55 -06:00
|
|
|
TEXT runtime·memeq(SB),NOSPLIT,$0-25
|
2014-07-16 15:16:19 -06:00
|
|
|
MOVQ a+0(FP), SI
|
|
|
|
MOVQ b+8(FP), DI
|
|
|
|
MOVQ size+16(FP), BX
|
|
|
|
CALL runtime·memeqbody(SB)
|
|
|
|
MOVB AX, ret+24(FP)
|
|
|
|
RET
|
|
|
|
|
2014-06-16 22:00:37 -06:00
|
|
|
// eqstring tests whether two strings are equal.
|
|
|
|
// See runtime_test.go:eqstring_generic for
|
2014-08-19 09:50:35 -06:00
|
|
|
// equivalent Go code.
|
2014-06-16 22:00:37 -06:00
|
|
|
TEXT runtime·eqstring(SB),NOSPLIT,$0-33
|
|
|
|
MOVQ s1len+8(FP), AX
|
|
|
|
MOVQ s2len+24(FP), BX
|
|
|
|
CMPQ AX, BX
|
|
|
|
JNE different
|
|
|
|
MOVQ s1str+0(FP), SI
|
|
|
|
MOVQ s2str+16(FP), DI
|
|
|
|
CMPQ SI, DI
|
|
|
|
JEQ same
|
|
|
|
CALL runtime·memeqbody(SB)
|
|
|
|
MOVB AX, v+32(FP)
|
|
|
|
RET
|
|
|
|
same:
|
|
|
|
MOVB $1, v+32(FP)
|
|
|
|
RET
|
|
|
|
different:
|
|
|
|
MOVB $0, v+32(FP)
|
|
|
|
RET
|
|
|
|
|
2013-04-02 17:26:15 -06:00
|
|
|
// a in SI
|
|
|
|
// b in DI
|
|
|
|
// count in BX
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·memeqbody(SB),NOSPLIT,$0-0
|
2013-04-02 17:26:15 -06:00
|
|
|
XORQ AX, AX
|
|
|
|
|
|
|
|
CMPQ BX, $8
|
|
|
|
JB small
|
|
|
|
|
|
|
|
// 64 bytes at a time using xmm registers
|
|
|
|
hugeloop:
|
|
|
|
CMPQ BX, $64
|
|
|
|
JB bigloop
|
|
|
|
MOVOU (SI), X0
|
|
|
|
MOVOU (DI), X1
|
|
|
|
MOVOU 16(SI), X2
|
|
|
|
MOVOU 16(DI), X3
|
|
|
|
MOVOU 32(SI), X4
|
|
|
|
MOVOU 32(DI), X5
|
|
|
|
MOVOU 48(SI), X6
|
|
|
|
MOVOU 48(DI), X7
|
|
|
|
PCMPEQB X1, X0
|
|
|
|
PCMPEQB X3, X2
|
|
|
|
PCMPEQB X5, X4
|
|
|
|
PCMPEQB X7, X6
|
|
|
|
PAND X2, X0
|
|
|
|
PAND X6, X4
|
|
|
|
PAND X4, X0
|
|
|
|
PMOVMSKB X0, DX
|
|
|
|
ADDQ $64, SI
|
|
|
|
ADDQ $64, DI
|
|
|
|
SUBQ $64, BX
|
|
|
|
CMPL DX, $0xffff
|
|
|
|
JEQ hugeloop
|
|
|
|
RET
|
|
|
|
|
|
|
|
// 8 bytes at a time using 64-bit register
|
|
|
|
bigloop:
|
|
|
|
CMPQ BX, $8
|
|
|
|
JBE leftover
|
|
|
|
MOVQ (SI), CX
|
|
|
|
MOVQ (DI), DX
|
|
|
|
ADDQ $8, SI
|
|
|
|
ADDQ $8, DI
|
|
|
|
SUBQ $8, BX
|
|
|
|
CMPQ CX, DX
|
|
|
|
JEQ bigloop
|
|
|
|
RET
|
|
|
|
|
|
|
|
// remaining 0-8 bytes
|
|
|
|
leftover:
|
|
|
|
MOVQ -8(SI)(BX*1), CX
|
|
|
|
MOVQ -8(DI)(BX*1), DX
|
|
|
|
CMPQ CX, DX
|
|
|
|
SETEQ AX
|
|
|
|
RET
|
|
|
|
|
|
|
|
small:
|
|
|
|
CMPQ BX, $0
|
|
|
|
JEQ equal
|
|
|
|
|
|
|
|
LEAQ 0(BX*8), CX
|
|
|
|
NEGQ CX
|
|
|
|
|
|
|
|
CMPB SI, $0xf8
|
|
|
|
JA si_high
|
|
|
|
|
|
|
|
// load at SI won't cross a page boundary.
|
|
|
|
MOVQ (SI), SI
|
|
|
|
JMP si_finish
|
|
|
|
si_high:
|
|
|
|
// address ends in 11111xxx. Load up to bytes we want, move to correct position.
|
|
|
|
MOVQ -8(SI)(BX*1), SI
|
|
|
|
SHRQ CX, SI
|
|
|
|
si_finish:
|
|
|
|
|
|
|
|
// same for DI.
|
|
|
|
CMPB DI, $0xf8
|
|
|
|
JA di_high
|
|
|
|
MOVQ (DI), DI
|
|
|
|
JMP di_finish
|
|
|
|
di_high:
|
|
|
|
MOVQ -8(DI)(BX*1), DI
|
|
|
|
SHRQ CX, DI
|
|
|
|
di_finish:
|
|
|
|
|
|
|
|
SUBQ SI, DI
|
|
|
|
SHLQ CX, DI
|
|
|
|
equal:
|
|
|
|
SETEQ AX
|
|
|
|
RET
|
2013-05-14 17:05:51 -06:00
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·cmpstring(SB),NOSPLIT,$0-40
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ s1_base+0(FP), SI
|
|
|
|
MOVQ s1_len+8(FP), BX
|
|
|
|
MOVQ s2_base+16(FP), DI
|
|
|
|
MOVQ s2_len+24(FP), DX
|
2013-05-14 17:05:51 -06:00
|
|
|
CALL runtime·cmpbody(SB)
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVQ AX, ret+32(FP)
|
2013-05-14 17:05:51 -06:00
|
|
|
RET
|
|
|
|
|
2014-08-28 08:46:59 -06:00
|
|
|
TEXT runtime·cmpbytes(SB),NOSPLIT,$0-56
|
2013-05-14 17:05:51 -06:00
|
|
|
MOVQ s1+0(FP), SI
|
|
|
|
MOVQ s1+8(FP), BX
|
|
|
|
MOVQ s2+24(FP), DI
|
|
|
|
MOVQ s2+32(FP), DX
|
|
|
|
CALL runtime·cmpbody(SB)
|
|
|
|
MOVQ AX, res+48(FP)
|
|
|
|
RET
|
|
|
|
|
|
|
|
// input:
|
|
|
|
// SI = a
|
|
|
|
// DI = b
|
|
|
|
// BX = alen
|
|
|
|
// DX = blen
|
|
|
|
// output:
|
|
|
|
// AX = 1/0/-1
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·cmpbody(SB),NOSPLIT,$0-0
|
2013-05-14 17:05:51 -06:00
|
|
|
CMPQ SI, DI
|
|
|
|
JEQ cmp_allsame
|
|
|
|
CMPQ BX, DX
|
|
|
|
MOVQ DX, BP
|
|
|
|
CMOVQLT BX, BP // BP = min(alen, blen) = # of bytes to compare
|
|
|
|
CMPQ BP, $8
|
|
|
|
JB cmp_small
|
|
|
|
|
|
|
|
cmp_loop:
|
|
|
|
CMPQ BP, $16
|
|
|
|
JBE cmp_0through16
|
|
|
|
MOVOU (SI), X0
|
|
|
|
MOVOU (DI), X1
|
|
|
|
PCMPEQB X0, X1
|
|
|
|
PMOVMSKB X1, AX
|
|
|
|
XORQ $0xffff, AX // convert EQ to NE
|
|
|
|
JNE cmp_diff16 // branch if at least one byte is not equal
|
|
|
|
ADDQ $16, SI
|
|
|
|
ADDQ $16, DI
|
|
|
|
SUBQ $16, BP
|
|
|
|
JMP cmp_loop
|
|
|
|
|
|
|
|
// AX = bit mask of differences
|
|
|
|
cmp_diff16:
|
|
|
|
BSFQ AX, BX // index of first byte that differs
|
|
|
|
XORQ AX, AX
|
|
|
|
MOVB (SI)(BX*1), CX
|
|
|
|
CMPB CX, (DI)(BX*1)
|
|
|
|
SETHI AX
|
|
|
|
LEAQ -1(AX*2), AX // convert 1/0 to +1/-1
|
|
|
|
RET
|
|
|
|
|
|
|
|
// 0 through 16 bytes left, alen>=8, blen>=8
|
|
|
|
cmp_0through16:
|
|
|
|
CMPQ BP, $8
|
|
|
|
JBE cmp_0through8
|
|
|
|
MOVQ (SI), AX
|
|
|
|
MOVQ (DI), CX
|
|
|
|
CMPQ AX, CX
|
|
|
|
JNE cmp_diff8
|
|
|
|
cmp_0through8:
|
|
|
|
MOVQ -8(SI)(BP*1), AX
|
|
|
|
MOVQ -8(DI)(BP*1), CX
|
|
|
|
CMPQ AX, CX
|
|
|
|
JEQ cmp_allsame
|
|
|
|
|
|
|
|
// AX and CX contain parts of a and b that differ.
|
|
|
|
cmp_diff8:
|
|
|
|
BSWAPQ AX // reverse order of bytes
|
|
|
|
BSWAPQ CX
|
|
|
|
XORQ AX, CX
|
|
|
|
BSRQ CX, CX // index of highest bit difference
|
|
|
|
SHRQ CX, AX // move a's bit to bottom
|
|
|
|
ANDQ $1, AX // mask bit
|
|
|
|
LEAQ -1(AX*2), AX // 1/0 => +1/-1
|
|
|
|
RET
|
|
|
|
|
|
|
|
// 0-7 bytes in common
|
|
|
|
cmp_small:
|
|
|
|
LEAQ (BP*8), CX // bytes left -> bits left
|
|
|
|
NEGQ CX // - bits lift (== 64 - bits left mod 64)
|
|
|
|
JEQ cmp_allsame
|
|
|
|
|
|
|
|
// load bytes of a into high bytes of AX
|
|
|
|
CMPB SI, $0xf8
|
|
|
|
JA cmp_si_high
|
|
|
|
MOVQ (SI), SI
|
|
|
|
JMP cmp_si_finish
|
|
|
|
cmp_si_high:
|
|
|
|
MOVQ -8(SI)(BP*1), SI
|
|
|
|
SHRQ CX, SI
|
|
|
|
cmp_si_finish:
|
|
|
|
SHLQ CX, SI
|
|
|
|
|
|
|
|
// load bytes of b in to high bytes of BX
|
|
|
|
CMPB DI, $0xf8
|
|
|
|
JA cmp_di_high
|
|
|
|
MOVQ (DI), DI
|
|
|
|
JMP cmp_di_finish
|
|
|
|
cmp_di_high:
|
|
|
|
MOVQ -8(DI)(BP*1), DI
|
|
|
|
SHRQ CX, DI
|
|
|
|
cmp_di_finish:
|
|
|
|
SHLQ CX, DI
|
|
|
|
|
|
|
|
BSWAPQ SI // reverse order of bytes
|
|
|
|
BSWAPQ DI
|
|
|
|
XORQ SI, DI // find bit differences
|
|
|
|
JEQ cmp_allsame
|
|
|
|
BSRQ DI, CX // index of highest bit difference
|
|
|
|
SHRQ CX, SI // move a's bit to bottom
|
|
|
|
ANDQ $1, SI // mask bit
|
|
|
|
LEAQ -1(SI*2), AX // 1/0 => +1/-1
|
|
|
|
RET
|
|
|
|
|
|
|
|
cmp_allsame:
|
|
|
|
XORQ AX, AX
|
|
|
|
XORQ CX, CX
|
|
|
|
CMPQ BX, DX
|
|
|
|
SETGT AX // 1 if alen > blen
|
|
|
|
SETEQ CX // 1 if alen == blen
|
|
|
|
LEAQ -1(CX)(AX*2), AX // 1,0,-1 result
|
|
|
|
RET
|
2013-08-01 17:11:19 -06:00
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT bytes·IndexByte(SB),NOSPLIT,$0
|
2013-08-01 17:11:19 -06:00
|
|
|
MOVQ s+0(FP), SI
|
|
|
|
MOVQ s_len+8(FP), BX
|
|
|
|
MOVB c+24(FP), AL
|
2013-08-05 16:04:05 -06:00
|
|
|
CALL runtime·indexbytebody(SB)
|
|
|
|
MOVQ AX, ret+32(FP)
|
|
|
|
RET
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT strings·IndexByte(SB),NOSPLIT,$0
|
2013-08-05 16:04:05 -06:00
|
|
|
MOVQ s+0(FP), SI
|
|
|
|
MOVQ s_len+8(FP), BX
|
|
|
|
MOVB c+16(FP), AL
|
|
|
|
CALL runtime·indexbytebody(SB)
|
|
|
|
MOVQ AX, ret+24(FP)
|
|
|
|
RET
|
|
|
|
|
|
|
|
// input:
|
|
|
|
// SI: data
|
|
|
|
// BX: data len
|
|
|
|
// AL: byte sought
|
|
|
|
// output:
|
|
|
|
// AX
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT runtime·indexbytebody(SB),NOSPLIT,$0
|
2013-08-01 17:11:19 -06:00
|
|
|
MOVQ SI, DI
|
|
|
|
|
|
|
|
CMPQ BX, $16
|
|
|
|
JLT indexbyte_small
|
|
|
|
|
|
|
|
// round up to first 16-byte boundary
|
|
|
|
TESTQ $15, SI
|
|
|
|
JZ aligned
|
|
|
|
MOVQ SI, CX
|
|
|
|
ANDQ $~15, CX
|
|
|
|
ADDQ $16, CX
|
|
|
|
|
|
|
|
// search the beginning
|
|
|
|
SUBQ SI, CX
|
|
|
|
REPN; SCASB
|
|
|
|
JZ success
|
|
|
|
|
|
|
|
// DI is 16-byte aligned; get ready to search using SSE instructions
|
|
|
|
aligned:
|
|
|
|
// round down to last 16-byte boundary
|
|
|
|
MOVQ BX, R11
|
|
|
|
ADDQ SI, R11
|
|
|
|
ANDQ $~15, R11
|
|
|
|
|
|
|
|
// shuffle X0 around so that each byte contains c
|
|
|
|
MOVD AX, X0
|
|
|
|
PUNPCKLBW X0, X0
|
|
|
|
PUNPCKLBW X0, X0
|
|
|
|
PSHUFL $0, X0, X0
|
|
|
|
JMP condition
|
|
|
|
|
|
|
|
sse:
|
|
|
|
// move the next 16-byte chunk of the buffer into X1
|
|
|
|
MOVO (DI), X1
|
|
|
|
// compare bytes in X0 to X1
|
|
|
|
PCMPEQB X0, X1
|
|
|
|
// take the top bit of each byte in X1 and put the result in DX
|
|
|
|
PMOVMSKB X1, DX
|
|
|
|
TESTL DX, DX
|
|
|
|
JNZ ssesuccess
|
|
|
|
ADDQ $16, DI
|
|
|
|
|
|
|
|
condition:
|
|
|
|
CMPQ DI, R11
|
|
|
|
JLT sse
|
|
|
|
|
|
|
|
// search the end
|
|
|
|
MOVQ SI, CX
|
|
|
|
ADDQ BX, CX
|
|
|
|
SUBQ R11, CX
|
|
|
|
// if CX == 0, the zero flag will be set and we'll end up
|
|
|
|
// returning a false success
|
|
|
|
JZ failure
|
|
|
|
REPN; SCASB
|
|
|
|
JZ success
|
|
|
|
|
|
|
|
failure:
|
2013-08-05 16:04:05 -06:00
|
|
|
MOVQ $-1, AX
|
2013-08-01 17:11:19 -06:00
|
|
|
RET
|
|
|
|
|
|
|
|
// handle for lengths < 16
|
|
|
|
indexbyte_small:
|
|
|
|
MOVQ BX, CX
|
|
|
|
REPN; SCASB
|
|
|
|
JZ success
|
2013-08-05 16:04:05 -06:00
|
|
|
MOVQ $-1, AX
|
2013-08-01 17:11:19 -06:00
|
|
|
RET
|
|
|
|
|
|
|
|
// we've found the chunk containing the byte
|
|
|
|
// now just figure out which specific byte it is
|
|
|
|
ssesuccess:
|
|
|
|
// get the index of the least significant set bit
|
|
|
|
BSFW DX, DX
|
|
|
|
SUBQ SI, DI
|
|
|
|
ADDQ DI, DX
|
2013-08-05 16:04:05 -06:00
|
|
|
MOVQ DX, AX
|
2013-08-01 17:11:19 -06:00
|
|
|
RET
|
|
|
|
|
|
|
|
success:
|
|
|
|
SUBQ SI, DI
|
|
|
|
SUBL $1, DI
|
2013-08-05 16:04:05 -06:00
|
|
|
MOVQ DI, AX
|
2013-08-01 17:11:19 -06:00
|
|
|
RET
|
|
|
|
|
2013-08-07 11:23:24 -06:00
|
|
|
TEXT bytes·Equal(SB),NOSPLIT,$0-49
|
2013-08-01 17:11:19 -06:00
|
|
|
MOVQ a_len+8(FP), BX
|
|
|
|
MOVQ b_len+32(FP), CX
|
|
|
|
XORQ AX, AX
|
|
|
|
CMPQ BX, CX
|
|
|
|
JNE eqret
|
|
|
|
MOVQ a+0(FP), SI
|
|
|
|
MOVQ b+24(FP), DI
|
|
|
|
CALL runtime·memeqbody(SB)
|
|
|
|
eqret:
|
|
|
|
MOVB AX, ret+48(FP)
|
|
|
|
RET
|
2014-04-01 13:51:02 -06:00
|
|
|
|
|
|
|
// A Duff's device for zeroing memory.
|
|
|
|
// The compiler jumps to computed addresses within
|
|
|
|
// this routine to zero chunks of memory. Do not
|
|
|
|
// change this code without also changing the code
|
|
|
|
// in ../../cmd/6g/ggen.c:clearfat.
|
|
|
|
// AX: zero
|
|
|
|
// DI: ptr to memory to be zeroed
|
|
|
|
// DI is updated as a side effect.
|
|
|
|
TEXT runtime·duffzero(SB), NOSPLIT, $0-0
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
STOSQ
|
|
|
|
RET
|
|
|
|
|
|
|
|
// A Duff's device for copying memory.
|
|
|
|
// The compiler jumps to computed addresses within
|
|
|
|
// this routine to copy chunks of memory. Source
|
|
|
|
// and destination must not overlap. Do not
|
|
|
|
// change this code without also changing the code
|
|
|
|
// in ../../cmd/6g/cgen.c:sgen.
|
|
|
|
// SI: ptr to source memory
|
|
|
|
// DI: ptr to destination memory
|
|
|
|
// SI and DI are updated as a side effect.
|
|
|
|
|
|
|
|
// NOTE: this is equivalent to a sequence of MOVSQ but
|
|
|
|
// for some reason that is 3.5x slower than this code.
|
|
|
|
// The STOSQ above seem fine, though.
|
|
|
|
TEXT runtime·duffcopy(SB), NOSPLIT, $0-0
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
MOVQ (SI),CX
|
|
|
|
ADDQ $8,SI
|
|
|
|
MOVQ CX,(DI)
|
|
|
|
ADDQ $8,DI
|
|
|
|
|
|
|
|
RET
|
2014-05-02 10:32:42 -06:00
|
|
|
|
|
|
|
TEXT runtime·timenow(SB), NOSPLIT, $0-0
|
|
|
|
JMP time·now(SB)
|
2014-07-16 15:16:19 -06:00
|
|
|
|
2014-09-02 15:33:33 -06:00
|
|
|
TEXT runtime·fastrand1(SB), NOSPLIT, $0-4
|
2014-07-16 15:16:19 -06:00
|
|
|
get_tls(CX)
|
|
|
|
MOVQ g(CX), AX
|
|
|
|
MOVQ g_m(AX), AX
|
|
|
|
MOVL m_fastrand(AX), DX
|
|
|
|
ADDL DX, DX
|
|
|
|
MOVL DX, BX
|
|
|
|
XORL $0x88888eef, DX
|
|
|
|
CMOVLMI BX, DX
|
|
|
|
MOVL DX, m_fastrand(AX)
|
|
|
|
MOVL DX, ret+0(FP)
|
|
|
|
RET
|
2014-09-03 09:49:43 -06:00
|
|
|
|
|
|
|
TEXT runtime·return0(SB), NOSPLIT, $0
|
|
|
|
MOVL $0, AX
|
|
|
|
RET
|