2011-06-29 01:37:56 -06:00
|
|
|
// Copyright 2011 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
[dev.cc] runtime: convert assembly files for C to Go transition
The main change is that #include "zasm_GOOS_GOARCH.h"
is now #include "go_asm.h" and/or #include "go_tls.h".
Also, because C StackGuard is now Go _StackGuard,
the assembly name changes from const_StackGuard to
const__StackGuard.
In asm_$GOARCH.s, add new function getg, formerly
implemented in C.
The renamed atomics now have Go wrappers, to get
escape analysis annotations right. Those wrappers
are in CL 174860043.
LGTM=r, aram
R=r, aram
CC=austin, dvyukov, golang-codereviews, iant, khr
https://golang.org/cl/168510043
2014-11-11 15:06:22 -07:00
|
|
|
#include "go_asm.h"
|
|
|
|
#include "go_tls.h"
|
2014-09-04 21:05:18 -06:00
|
|
|
#include "textflag.h"
|
2011-06-29 01:37:56 -06:00
|
|
|
|
2011-11-28 18:57:20 -07:00
|
|
|
// maxargs should be divisible by 2, as Windows stack
|
|
|
|
// must be kept 16-byte aligned on syscall entry.
|
|
|
|
#define maxargs 16
|
2011-08-27 07:17:00 -06:00
|
|
|
|
|
|
|
// void runtime·asmstdcall(void *c);
|
2016-05-25 18:01:25 -06:00
|
|
|
TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0
|
2011-08-27 07:17:00 -06:00
|
|
|
// asmcgocall will put first argument into CX.
|
|
|
|
PUSHQ CX // save for later
|
2014-01-16 21:58:10 -07:00
|
|
|
MOVQ libcall_fn(CX), AX
|
|
|
|
MOVQ libcall_args(CX), SI
|
|
|
|
MOVQ libcall_n(CX), CX
|
2011-08-27 07:17:00 -06:00
|
|
|
|
|
|
|
// SetLastError(0).
|
|
|
|
MOVQ 0x30(GS), DI
|
|
|
|
MOVL $0, 0x68(DI)
|
|
|
|
|
|
|
|
SUBQ $(maxargs*8), SP // room for args
|
|
|
|
|
|
|
|
// Fast version, do not store args on the stack.
|
|
|
|
CMPL CX, $4
|
|
|
|
JLE loadregs
|
|
|
|
|
|
|
|
// Check we have enough room for args.
|
|
|
|
CMPL CX, $maxargs
|
|
|
|
JLE 2(PC)
|
|
|
|
INT $3 // not enough room -> crash
|
|
|
|
|
|
|
|
// Copy args to the stack.
|
2011-06-29 01:37:56 -06:00
|
|
|
MOVQ SP, DI
|
|
|
|
CLD
|
|
|
|
REP; MOVSQ
|
2011-08-27 07:17:00 -06:00
|
|
|
MOVQ SP, SI
|
|
|
|
|
|
|
|
loadregs:
|
|
|
|
// Load first 4 args into correspondent registers.
|
|
|
|
MOVQ 0(SI), CX
|
|
|
|
MOVQ 8(SI), DX
|
|
|
|
MOVQ 16(SI), R8
|
|
|
|
MOVQ 24(SI), R9
|
2016-10-26 11:29:39 -06:00
|
|
|
// Floating point arguments are passed in the XMM
|
|
|
|
// registers. Set them here in case any of the arguments
|
|
|
|
// are floating point values. For details see
|
|
|
|
// https://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
|
|
|
|
MOVQ CX, X0
|
|
|
|
MOVQ DX, X1
|
|
|
|
MOVQ R8, X2
|
|
|
|
MOVQ R9, X3
|
2011-06-29 01:37:56 -06:00
|
|
|
|
|
|
|
// Call stdcall function.
|
|
|
|
CALL AX
|
2011-08-27 07:17:00 -06:00
|
|
|
|
|
|
|
ADDQ $(maxargs*8), SP
|
|
|
|
|
|
|
|
// Return result.
|
|
|
|
POPQ CX
|
2014-01-16 21:58:10 -07:00
|
|
|
MOVQ AX, libcall_r1(CX)
|
2011-08-27 07:17:00 -06:00
|
|
|
|
|
|
|
// GetLastError().
|
|
|
|
MOVQ 0x30(GS), DI
|
|
|
|
MOVL 0x68(DI), AX
|
2014-01-16 21:58:10 -07:00
|
|
|
MOVQ AX, libcall_err(CX)
|
2011-06-29 01:37:56 -06:00
|
|
|
|
|
|
|
RET
|
|
|
|
|
2016-05-25 18:01:25 -06:00
|
|
|
TEXT runtime·badsignal2(SB),NOSPLIT|NOFRAME,$48
|
2012-03-12 13:55:18 -06:00
|
|
|
// stderr
|
|
|
|
MOVQ $-12, CX // stderr
|
|
|
|
MOVQ CX, 0(SP)
|
2014-11-18 17:55:15 -07:00
|
|
|
MOVQ runtime·_GetStdHandle(SB), AX
|
2012-03-12 13:55:18 -06:00
|
|
|
CALL AX
|
|
|
|
|
|
|
|
MOVQ AX, CX // handle
|
|
|
|
MOVQ CX, 0(SP)
|
|
|
|
MOVQ $runtime·badsignalmsg(SB), DX // pointer
|
|
|
|
MOVQ DX, 8(SP)
|
|
|
|
MOVL $runtime·badsignallen(SB), R8 // count
|
|
|
|
MOVQ R8, 16(SP)
|
|
|
|
LEAQ 40(SP), R9 // written count
|
|
|
|
MOVQ $0, 0(R9)
|
|
|
|
MOVQ R9, 24(SP)
|
|
|
|
MOVQ $0, 32(SP) // overlapped
|
2014-11-18 17:55:15 -07:00
|
|
|
MOVQ runtime·_WriteFile(SB), AX
|
2012-03-12 13:55:18 -06:00
|
|
|
CALL AX
|
|
|
|
|
|
|
|
RET
|
|
|
|
|
2011-06-29 01:37:56 -06:00
|
|
|
// faster get/set last error
|
2013-08-07 13:20:05 -06:00
|
|
|
TEXT runtime·getlasterror(SB),NOSPLIT,$0
|
2011-06-29 01:37:56 -06:00
|
|
|
MOVQ 0x30(GS), AX
|
|
|
|
MOVL 0x68(AX), AX
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
MOVL AX, ret+0(FP)
|
2011-06-29 01:37:56 -06:00
|
|
|
RET
|
|
|
|
|
2013-08-07 13:20:05 -06:00
|
|
|
TEXT runtime·setlasterror(SB),NOSPLIT,$0
|
2011-06-29 01:37:56 -06:00
|
|
|
MOVL err+0(FP), AX
|
|
|
|
MOVQ 0x30(GS), CX
|
|
|
|
MOVL AX, 0x68(CX)
|
|
|
|
RET
|
|
|
|
|
2014-03-25 18:13:50 -06:00
|
|
|
// Called by Windows as a Vectored Exception Handler (VEH).
|
|
|
|
// First argument is pointer to struct containing
|
|
|
|
// exception record and context pointers.
|
2014-10-14 18:11:11 -06:00
|
|
|
// Handler function is stored in AX.
|
2014-03-25 18:13:50 -06:00
|
|
|
// Return 0 for 'not handled', -1 for handled.
|
2016-05-25 18:01:25 -06:00
|
|
|
TEXT runtime·sigtramp(SB),NOSPLIT|NOFRAME,$0-0
|
2014-03-25 18:13:50 -06:00
|
|
|
// CX: PEXCEPTION_POINTERS ExceptionInfo
|
2011-09-03 02:27:16 -06:00
|
|
|
|
2014-03-25 18:13:50 -06:00
|
|
|
// DI SI BP BX R12 R13 R14 R15 registers and DF flag are preserved
|
|
|
|
// as required by windows callback convention.
|
|
|
|
PUSHFQ
|
2014-09-08 14:56:46 -06:00
|
|
|
SUBQ $112, SP
|
2014-03-25 18:13:50 -06:00
|
|
|
MOVQ DI, 80(SP)
|
|
|
|
MOVQ SI, 72(SP)
|
|
|
|
MOVQ BP, 64(SP)
|
|
|
|
MOVQ BX, 56(SP)
|
|
|
|
MOVQ R12, 48(SP)
|
|
|
|
MOVQ R13, 40(SP)
|
|
|
|
MOVQ R14, 32(SP)
|
2014-08-27 12:43:07 -06:00
|
|
|
MOVQ R15, 88(SP)
|
2014-03-25 18:13:50 -06:00
|
|
|
|
2014-10-14 18:11:11 -06:00
|
|
|
MOVQ AX, R15 // save handler address
|
|
|
|
|
2014-09-08 14:56:46 -06:00
|
|
|
// find g
|
2014-03-25 18:13:50 -06:00
|
|
|
get_tls(DX)
|
2014-07-09 21:55:35 -06:00
|
|
|
CMPQ DX, $0
|
|
|
|
JNE 3(PC)
|
|
|
|
MOVQ $0, AX // continue
|
|
|
|
JMP done
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ g(DX), DX
|
|
|
|
CMPQ DX, $0
|
2012-03-12 13:55:18 -06:00
|
|
|
JNE 2(PC)
|
2013-07-11 15:06:43 -06:00
|
|
|
CALL runtime·badsignal2(SB)
|
2014-09-08 14:56:46 -06:00
|
|
|
|
|
|
|
// save g and SP in case of stack switch
|
|
|
|
MOVQ DX, 96(SP) // g
|
|
|
|
MOVQ SP, 104(SP)
|
|
|
|
|
|
|
|
// do we need to switch to the g0 stack?
|
|
|
|
MOVQ g_m(DX), BX
|
|
|
|
MOVQ m_g0(BX), BX
|
|
|
|
CMPQ DX, BX
|
[dev.power64] cmd/5a, cmd/6a, cmd/8a, cmd/9a: make labels function-scoped
I removed support for jumping between functions years ago,
as part of doing the instruction layout for each function separately.
Given that, it makes sense to treat labels as function-scoped.
This lets each function have its own 'loop' label, for example.
Makes the assembly much cleaner and removes the last
reason anyone would reach for the 123(PC) form instead.
Note that this is on the dev.power64 branch, but it changes all
the assemblers. The change will ship in Go 1.5 (perhaps after
being ported into the new assembler).
Came up as part of CL 167730043.
LGTM=r
R=r
CC=austin, dave, golang-codereviews, minux
https://golang.org/cl/159670043
2014-10-28 19:50:16 -06:00
|
|
|
JEQ g0
|
2014-09-08 14:56:46 -06:00
|
|
|
|
|
|
|
// switch to g0 stack
|
|
|
|
get_tls(BP)
|
|
|
|
MOVQ BX, g(BP)
|
|
|
|
MOVQ (g_sched+gobuf_sp)(BX), DI
|
|
|
|
// make it look like mstart called us on g0, to stop traceback
|
|
|
|
SUBQ $8, DI
|
|
|
|
MOVQ $runtime·mstart(SB), SI
|
|
|
|
MOVQ SI, 0(DI)
|
|
|
|
// traceback will think that we've done PUSHFQ and SUBQ
|
|
|
|
// on this stack, so subtract them here to match.
|
|
|
|
// (we need room for sighandler arguments anyway).
|
|
|
|
// and re-save old SP for restoring later.
|
|
|
|
SUBQ $(112+8), DI
|
|
|
|
// save g, save old stack pointer.
|
|
|
|
MOVQ SP, 104(DI)
|
|
|
|
MOVQ DI, SP
|
|
|
|
|
[dev.power64] cmd/5a, cmd/6a, cmd/8a, cmd/9a: make labels function-scoped
I removed support for jumping between functions years ago,
as part of doing the instruction layout for each function separately.
Given that, it makes sense to treat labels as function-scoped.
This lets each function have its own 'loop' label, for example.
Makes the assembly much cleaner and removes the last
reason anyone would reach for the 123(PC) form instead.
Note that this is on the dev.power64 branch, but it changes all
the assemblers. The change will ship in Go 1.5 (perhaps after
being ported into the new assembler).
Came up as part of CL 167730043.
LGTM=r
R=r
CC=austin, dave, golang-codereviews, minux
https://golang.org/cl/159670043
2014-10-28 19:50:16 -06:00
|
|
|
g0:
|
2014-09-08 14:56:46 -06:00
|
|
|
MOVQ 0(CX), BX // ExceptionRecord*
|
|
|
|
MOVQ 8(CX), CX // Context*
|
2014-03-25 18:13:50 -06:00
|
|
|
MOVQ BX, 0(SP)
|
|
|
|
MOVQ CX, 8(SP)
|
|
|
|
MOVQ DX, 16(SP)
|
2014-10-14 18:11:11 -06:00
|
|
|
CALL R15 // call handler
|
2014-03-25 18:13:50 -06:00
|
|
|
// AX is set to report result back to Windows
|
2014-08-27 12:43:07 -06:00
|
|
|
MOVL 24(SP), AX
|
2011-09-03 02:27:16 -06:00
|
|
|
|
2014-09-08 14:56:46 -06:00
|
|
|
// switch back to original stack and g
|
|
|
|
// no-op if we never left.
|
|
|
|
MOVQ 104(SP), SP
|
|
|
|
MOVQ 96(SP), DX
|
|
|
|
get_tls(BP)
|
|
|
|
MOVQ DX, g(BP)
|
|
|
|
|
2014-07-09 21:55:35 -06:00
|
|
|
done:
|
2014-03-25 18:13:50 -06:00
|
|
|
// restore registers as required for windows callback
|
2014-08-27 12:43:07 -06:00
|
|
|
MOVQ 88(SP), R15
|
2014-03-25 18:13:50 -06:00
|
|
|
MOVQ 32(SP), R14
|
|
|
|
MOVQ 40(SP), R13
|
|
|
|
MOVQ 48(SP), R12
|
|
|
|
MOVQ 56(SP), BX
|
|
|
|
MOVQ 64(SP), BP
|
|
|
|
MOVQ 72(SP), SI
|
|
|
|
MOVQ 80(SP), DI
|
2014-09-08 14:56:46 -06:00
|
|
|
ADDQ $112, SP
|
2014-03-25 18:13:50 -06:00
|
|
|
POPFQ
|
2012-03-12 14:48:16 -06:00
|
|
|
|
2011-09-03 02:27:16 -06:00
|
|
|
RET
|
|
|
|
|
2016-05-25 18:01:25 -06:00
|
|
|
TEXT runtime·exceptiontramp(SB),NOSPLIT|NOFRAME,$0
|
2014-10-14 18:11:11 -06:00
|
|
|
MOVQ $runtime·exceptionhandler(SB), AX
|
|
|
|
JMP runtime·sigtramp(SB)
|
|
|
|
|
2016-05-25 18:01:25 -06:00
|
|
|
TEXT runtime·firstcontinuetramp(SB),NOSPLIT|NOFRAME,$0-0
|
2014-10-14 18:11:11 -06:00
|
|
|
MOVQ $runtime·firstcontinuehandler(SB), AX
|
|
|
|
JMP runtime·sigtramp(SB)
|
|
|
|
|
2016-05-25 18:01:25 -06:00
|
|
|
TEXT runtime·lastcontinuetramp(SB),NOSPLIT|NOFRAME,$0-0
|
2014-10-14 18:11:11 -06:00
|
|
|
MOVQ $runtime·lastcontinuehandler(SB), AX
|
|
|
|
JMP runtime·sigtramp(SB)
|
|
|
|
|
2016-05-25 18:01:25 -06:00
|
|
|
TEXT runtime·ctrlhandler(SB),NOSPLIT|NOFRAME,$8
|
2011-09-17 01:57:59 -06:00
|
|
|
MOVQ CX, 16(SP) // spill
|
|
|
|
MOVQ $runtime·ctrlhandler1(SB), CX
|
|
|
|
MOVQ CX, 0(SP)
|
|
|
|
CALL runtime·externalthreadhandler(SB)
|
|
|
|
RET
|
|
|
|
|
2016-05-25 18:01:25 -06:00
|
|
|
TEXT runtime·profileloop(SB),NOSPLIT|NOFRAME,$8
|
2011-09-17 01:57:59 -06:00
|
|
|
MOVQ $runtime·profileloop1(SB), CX
|
|
|
|
MOVQ CX, 0(SP)
|
|
|
|
CALL runtime·externalthreadhandler(SB)
|
|
|
|
RET
|
|
|
|
|
2016-05-25 18:01:25 -06:00
|
|
|
TEXT runtime·externalthreadhandler(SB),NOSPLIT|NOFRAME,$0
|
2011-08-18 10:37:42 -06:00
|
|
|
PUSHQ BP
|
|
|
|
MOVQ SP, BP
|
|
|
|
PUSHQ BX
|
|
|
|
PUSHQ SI
|
|
|
|
PUSHQ DI
|
2012-01-08 17:23:07 -07:00
|
|
|
PUSHQ 0x28(GS)
|
2011-09-17 01:57:59 -06:00
|
|
|
MOVQ SP, DX
|
2011-08-18 10:37:42 -06:00
|
|
|
|
|
|
|
// setup dummy m, g
|
2015-01-06 21:38:44 -07:00
|
|
|
SUBQ $m__size, SP // space for M
|
2011-09-17 01:57:59 -06:00
|
|
|
MOVQ SP, 0(SP)
|
2015-01-06 21:38:44 -07:00
|
|
|
MOVQ $m__size, 8(SP)
|
2016-10-17 16:41:56 -06:00
|
|
|
CALL runtime·memclrNoHeapPointers(SB) // smashes AX,BX,CX, maybe BP
|
2011-09-17 01:57:59 -06:00
|
|
|
|
2011-08-18 10:37:42 -06:00
|
|
|
LEAQ m_tls(SP), CX
|
2012-01-08 17:23:07 -07:00
|
|
|
MOVQ CX, 0x28(GS)
|
2011-09-17 01:57:59 -06:00
|
|
|
MOVQ SP, BX
|
2015-01-06 21:38:44 -07:00
|
|
|
SUBQ $g__size, SP // space for G
|
2011-08-18 10:37:42 -06:00
|
|
|
MOVQ SP, g(CX)
|
2011-09-17 01:57:59 -06:00
|
|
|
MOVQ SP, m_g0(BX)
|
|
|
|
|
|
|
|
MOVQ SP, 0(SP)
|
2015-01-06 21:38:44 -07:00
|
|
|
MOVQ $g__size, 8(SP)
|
2016-10-17 16:41:56 -06:00
|
|
|
CALL runtime·memclrNoHeapPointers(SB) // smashes AX,BX,CX, maybe BP
|
2015-01-06 21:38:44 -07:00
|
|
|
LEAQ g__size(SP), BX
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ BX, g_m(SP)
|
|
|
|
|
2016-01-06 10:17:46 -07:00
|
|
|
LEAQ -32768(SP), CX // must be less than SizeOfStackReserve set by linker
|
2014-09-09 12:02:37 -06:00
|
|
|
MOVQ CX, (g_stack+stack_lo)(SP)
|
2014-11-18 17:55:15 -07:00
|
|
|
ADDQ $const__StackGuard, CX
|
2015-01-05 09:29:21 -07:00
|
|
|
MOVQ CX, g_stackguard0(SP)
|
|
|
|
MOVQ CX, g_stackguard1(SP)
|
2014-09-09 12:02:37 -06:00
|
|
|
MOVQ DX, (g_stack+stack_hi)(SP)
|
2011-08-18 10:37:42 -06:00
|
|
|
|
2015-04-13 18:48:05 -06:00
|
|
|
PUSHQ AX // room for return value
|
2011-09-17 01:57:59 -06:00
|
|
|
PUSHQ 32(BP) // arg for handler
|
|
|
|
CALL 16(BP)
|
2011-08-18 10:37:42 -06:00
|
|
|
POPQ CX
|
2015-04-13 18:48:05 -06:00
|
|
|
POPQ AX // pass return value to Windows in AX
|
2011-08-18 10:37:42 -06:00
|
|
|
|
|
|
|
get_tls(CX)
|
|
|
|
MOVQ g(CX), CX
|
2014-09-09 12:02:37 -06:00
|
|
|
MOVQ (g_stack+stack_hi)(CX), SP
|
2012-01-08 17:23:07 -07:00
|
|
|
POPQ 0x28(GS)
|
2011-08-18 10:37:42 -06:00
|
|
|
POPQ DI
|
|
|
|
POPQ SI
|
|
|
|
POPQ BX
|
|
|
|
POPQ BP
|
2011-08-29 06:12:56 -06:00
|
|
|
RET
|
2011-08-30 06:02:02 -06:00
|
|
|
|
2014-09-24 17:04:06 -06:00
|
|
|
GLOBL runtime·cbctxts(SB), NOPTR, $8
|
2013-06-24 01:17:45 -06:00
|
|
|
|
2013-08-07 13:20:05 -06:00
|
|
|
TEXT runtime·callbackasm1(SB),NOSPLIT,$0
|
2011-08-30 06:02:02 -06:00
|
|
|
// Construct args vector for cgocallback().
|
|
|
|
// By windows/amd64 calling convention first 4 args are in CX, DX, R8, R9
|
|
|
|
// args from the 5th on are on the stack.
|
|
|
|
// In any case, even if function has 0,1,2,3,4 args, there is reserved
|
|
|
|
// but uninitialized "shadow space" for the first 4 args.
|
|
|
|
// The values are in registers.
|
2013-06-24 01:17:45 -06:00
|
|
|
MOVQ CX, (16+0)(SP)
|
|
|
|
MOVQ DX, (16+8)(SP)
|
|
|
|
MOVQ R8, (16+16)(SP)
|
|
|
|
MOVQ R9, (16+24)(SP)
|
|
|
|
|
|
|
|
// remove return address from stack, we are not returning there
|
|
|
|
MOVQ 0(SP), AX
|
|
|
|
ADDQ $8, SP
|
|
|
|
|
|
|
|
// determine index into runtime·cbctxts table
|
2014-08-18 20:12:51 -06:00
|
|
|
MOVQ $runtime·callbackasm(SB), DX
|
|
|
|
SUBQ DX, AX
|
2013-06-24 01:17:45 -06:00
|
|
|
MOVQ $0, DX
|
|
|
|
MOVQ $5, CX // divide by 5 because each call instruction in runtime·callbacks is 5 bytes long
|
2015-02-16 22:48:31 -07:00
|
|
|
DIVL CX
|
2013-06-24 01:17:45 -06:00
|
|
|
|
|
|
|
// find correspondent runtime·cbctxts table entry
|
|
|
|
MOVQ runtime·cbctxts(SB), CX
|
|
|
|
MOVQ -8(CX)(AX*8), AX
|
|
|
|
|
|
|
|
// extract callback context
|
2014-11-18 17:55:15 -07:00
|
|
|
MOVQ wincallbackcontext_argsize(AX), DX
|
|
|
|
MOVQ wincallbackcontext_gobody(AX), AX
|
2011-08-30 06:02:02 -06:00
|
|
|
|
|
|
|
// preserve whatever's at the memory location that
|
|
|
|
// the callback will use to store the return value
|
|
|
|
LEAQ 8(SP), CX // args vector, skip return address
|
|
|
|
PUSHQ 0(CX)(DX*1) // store 8 bytes from just after the args array
|
|
|
|
ADDQ $8, DX // extend argsize by size of return value
|
|
|
|
|
|
|
|
// DI SI BP BX R12 R13 R14 R15 registers and DF flag are preserved
|
|
|
|
// as required by windows callback convention.
|
|
|
|
PUSHFQ
|
|
|
|
SUBQ $64, SP
|
|
|
|
MOVQ DI, 56(SP)
|
|
|
|
MOVQ SI, 48(SP)
|
|
|
|
MOVQ BP, 40(SP)
|
|
|
|
MOVQ BX, 32(SP)
|
|
|
|
MOVQ R12, 24(SP)
|
|
|
|
MOVQ R13, 16(SP)
|
|
|
|
MOVQ R14, 8(SP)
|
|
|
|
MOVQ R15, 0(SP)
|
|
|
|
|
2012-03-08 13:53:11 -07:00
|
|
|
// prepare call stack. use SUBQ to hide from stack frame checks
|
2013-02-22 14:08:56 -07:00
|
|
|
// cgocallback(Go func, void *frame, uintptr framesize)
|
2012-03-08 13:53:11 -07:00
|
|
|
SUBQ $24, SP
|
2013-06-24 01:17:45 -06:00
|
|
|
MOVQ DX, 16(SP) // argsize (including return value)
|
|
|
|
MOVQ CX, 8(SP) // callback parameters
|
|
|
|
MOVQ AX, 0(SP) // address of target Go function
|
2011-08-30 06:02:02 -06:00
|
|
|
CLD
|
2013-06-24 01:17:45 -06:00
|
|
|
CALL runtime·cgocallback_gofunc(SB)
|
2012-03-08 13:53:11 -07:00
|
|
|
MOVQ 0(SP), AX
|
|
|
|
MOVQ 8(SP), CX
|
|
|
|
MOVQ 16(SP), DX
|
|
|
|
ADDQ $24, SP
|
2011-08-30 06:02:02 -06:00
|
|
|
|
|
|
|
// restore registers as required for windows callback
|
|
|
|
MOVQ 0(SP), R15
|
|
|
|
MOVQ 8(SP), R14
|
|
|
|
MOVQ 16(SP), R13
|
|
|
|
MOVQ 24(SP), R12
|
|
|
|
MOVQ 32(SP), BX
|
|
|
|
MOVQ 40(SP), BP
|
|
|
|
MOVQ 48(SP), SI
|
|
|
|
MOVQ 56(SP), DI
|
|
|
|
ADDQ $64, SP
|
|
|
|
POPFQ
|
|
|
|
|
|
|
|
MOVL -8(CX)(DX*1), AX // return value
|
|
|
|
POPQ -8(CX)(DX*1) // restore bytes just after the args
|
2011-06-29 01:37:56 -06:00
|
|
|
RET
|
|
|
|
|
2011-08-29 06:12:56 -06:00
|
|
|
// uint32 tstart_stdcall(M *newm);
|
2013-08-07 13:20:05 -06:00
|
|
|
TEXT runtime·tstart_stdcall(SB),NOSPLIT,$0
|
2011-08-29 06:12:56 -06:00
|
|
|
// CX contains first arg newm
|
2011-06-29 01:37:56 -06:00
|
|
|
MOVQ m_g0(CX), DX // g
|
|
|
|
|
|
|
|
// Layout new m scheduler stack on os stack.
|
|
|
|
MOVQ SP, AX
|
2014-09-09 12:02:37 -06:00
|
|
|
MOVQ AX, (g_stack+stack_hi)(DX)
|
2011-08-29 06:12:56 -06:00
|
|
|
SUBQ $(64*1024), AX // stack size
|
2014-09-09 12:02:37 -06:00
|
|
|
MOVQ AX, (g_stack+stack_lo)(DX)
|
2014-11-18 17:55:15 -07:00
|
|
|
ADDQ $const__StackGuard, AX
|
2015-01-05 09:29:21 -07:00
|
|
|
MOVQ AX, g_stackguard0(DX)
|
|
|
|
MOVQ AX, g_stackguard1(DX)
|
2011-06-29 01:37:56 -06:00
|
|
|
|
|
|
|
// Set up tls.
|
|
|
|
LEAQ m_tls(CX), SI
|
2012-01-08 17:23:07 -07:00
|
|
|
MOVQ SI, 0x28(GS)
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ CX, g_m(DX)
|
2011-06-29 01:37:56 -06:00
|
|
|
MOVQ DX, g(SI)
|
|
|
|
|
|
|
|
// Someday the convention will be D is always cleared.
|
|
|
|
CLD
|
|
|
|
|
2011-08-29 06:12:56 -06:00
|
|
|
CALL runtime·stackcheck(SB) // clobbers AX,CX
|
2013-03-01 09:44:43 -07:00
|
|
|
CALL runtime·mstart(SB)
|
2011-06-29 01:37:56 -06:00
|
|
|
|
|
|
|
XORL AX, AX // return 0 == success
|
|
|
|
RET
|
|
|
|
|
|
|
|
// set tls base to DI
|
2013-08-07 13:20:05 -06:00
|
|
|
TEXT runtime·settls(SB),NOSPLIT,$0
|
2012-01-08 17:23:07 -07:00
|
|
|
MOVQ DI, 0x28(GS)
|
2011-06-29 01:37:56 -06:00
|
|
|
RET
|
2012-05-29 23:10:54 -06:00
|
|
|
|
2016-03-29 23:33:52 -06:00
|
|
|
// func onosstack(fn unsafe.Pointer, arg uint32)
|
|
|
|
TEXT runtime·onosstack(SB),NOSPLIT,$0
|
|
|
|
MOVQ fn+0(FP), AX // to hide from 6l
|
|
|
|
MOVL arg+8(FP), BX
|
2013-07-15 20:36:05 -06:00
|
|
|
|
|
|
|
// Execute call on m->g0 stack, in case we are not actually
|
|
|
|
// calling a system call wrapper, like when running under WINE.
|
|
|
|
get_tls(R15)
|
|
|
|
CMPQ R15, $0
|
|
|
|
JNE 3(PC)
|
|
|
|
// Not a Go-managed thread. Do not switch stack.
|
2013-03-07 07:18:48 -07:00
|
|
|
CALL AX
|
|
|
|
RET
|
|
|
|
|
all: remove 'extern register M *m' from runtime
The runtime has historically held two dedicated values g (current goroutine)
and m (current thread) in 'extern register' slots (TLS on x86, real registers
backed by TLS on ARM).
This CL removes the extern register m; code now uses g->m.
On ARM, this frees up the register that formerly held m (R9).
This is important for NaCl, because NaCl ARM code cannot use R9 at all.
The Go 1 macrobenchmarks (those with per-op times >= 10 µs) are unaffected:
BenchmarkBinaryTree17 5491374955 5471024381 -0.37%
BenchmarkFannkuch11 4357101311 4275174828 -1.88%
BenchmarkGobDecode 11029957 11364184 +3.03%
BenchmarkGobEncode 6852205 6784822 -0.98%
BenchmarkGzip 650795967 650152275 -0.10%
BenchmarkGunzip 140962363 141041670 +0.06%
BenchmarkHTTPClientServer 71581 73081 +2.10%
BenchmarkJSONEncode 31928079 31913356 -0.05%
BenchmarkJSONDecode 117470065 113689916 -3.22%
BenchmarkMandelbrot200 6008923 5998712 -0.17%
BenchmarkGoParse 6310917 6327487 +0.26%
BenchmarkRegexpMatchMedium_1K 114568 114763 +0.17%
BenchmarkRegexpMatchHard_1K 168977 169244 +0.16%
BenchmarkRevcomp 935294971 914060918 -2.27%
BenchmarkTemplate 145917123 148186096 +1.55%
Minux previous reported larger variations, but these were caused by
run-to-run noise, not repeatable slowdowns.
Actual code changes by Minux.
I only did the docs and the benchmarking.
LGTM=dvyukov, iant, minux
R=minux, josharian, iant, dave, bradfitz, dvyukov
CC=golang-codereviews
https://golang.org/cl/109050043
2014-06-26 09:54:39 -06:00
|
|
|
MOVQ g(R15), R13
|
|
|
|
MOVQ g_m(R13), R13
|
2014-02-12 11:31:36 -07:00
|
|
|
|
|
|
|
// leave pc/sp for cpu profiler
|
|
|
|
MOVQ (SP), R12
|
|
|
|
MOVQ R12, m_libcallpc(R13)
|
2014-02-13 22:20:51 -07:00
|
|
|
MOVQ g(R15), R12
|
|
|
|
MOVQ R12, m_libcallg(R13)
|
|
|
|
// sp must be the last, because once async cpu profiler finds
|
|
|
|
// all three values to be non-zero, it will use them
|
cmd/cc, runtime: convert C compilers to use Go calling convention
To date, the C compilers and Go compilers differed only in how
values were returned from functions. This made it difficult to call
Go from C or C from Go if return values were involved. It also made
assembly called from Go and assembly called from C different.
This CL changes the C compiler to use the Go conventions, passing
results on the stack, after the arguments.
[Exception: this does not apply to C ... functions, because you can't
know where on the stack the arguments end.]
By doing this, the CL makes it possible to rewrite C functions into Go
one at a time, without worrying about which languages call that
function or which languages it calls.
This CL also updates all the assembly files in package runtime to use
the new conventions. Argument references of the form 40(SP) have
been rewritten to the form name+10(FP) instead, and there are now
Go func prototypes for every assembly function called from C or Go.
This means that 'go vet runtime' checks effectively every assembly
function, and go vet's output was used to automate the bulk of the
conversion.
Some functions, like seek and nsec on Plan 9, needed to be rewritten.
Many assembly routines called from C were reading arguments
incorrectly, using MOVL instead of MOVQ or vice versa, especially on
the less used systems like openbsd.
These were found by go vet and have been corrected too.
If we're lucky, this may reduce flakiness on those systems.
Tested on:
darwin/386
darwin/amd64
linux/arm
linux/386
linux/amd64
If this breaks another system, the bug is almost certainly in the
sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested
by the combination of the above systems.
LGTM=dvyukov, iant
R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant
CC=golang-codereviews, josharian, r
https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
|
|
|
LEAQ usec+0(FP), R12
|
2014-02-12 11:31:36 -07:00
|
|
|
MOVQ R12, m_libcallsp(R13)
|
|
|
|
|
|
|
|
MOVQ m_g0(R13), R14
|
2013-07-15 20:36:05 -06:00
|
|
|
CMPQ g(R15), R14
|
[dev.power64] cmd/5a, cmd/6a, cmd/8a, cmd/9a: make labels function-scoped
I removed support for jumping between functions years ago,
as part of doing the instruction layout for each function separately.
Given that, it makes sense to treat labels as function-scoped.
This lets each function have its own 'loop' label, for example.
Makes the assembly much cleaner and removes the last
reason anyone would reach for the 123(PC) form instead.
Note that this is on the dev.power64 branch, but it changes all
the assemblers. The change will ship in Go 1.5 (perhaps after
being ported into the new assembler).
Came up as part of CL 167730043.
LGTM=r
R=r
CC=austin, dave, golang-codereviews, minux
https://golang.org/cl/159670043
2014-10-28 19:50:16 -06:00
|
|
|
JNE switch
|
2013-07-15 20:36:05 -06:00
|
|
|
// executing on m->g0 already
|
|
|
|
CALL AX
|
[dev.power64] cmd/5a, cmd/6a, cmd/8a, cmd/9a: make labels function-scoped
I removed support for jumping between functions years ago,
as part of doing the instruction layout for each function separately.
Given that, it makes sense to treat labels as function-scoped.
This lets each function have its own 'loop' label, for example.
Makes the assembly much cleaner and removes the last
reason anyone would reach for the 123(PC) form instead.
Note that this is on the dev.power64 branch, but it changes all
the assemblers. The change will ship in Go 1.5 (perhaps after
being ported into the new assembler).
Came up as part of CL 167730043.
LGTM=r
R=r
CC=austin, dave, golang-codereviews, minux
https://golang.org/cl/159670043
2014-10-28 19:50:16 -06:00
|
|
|
JMP ret
|
2013-07-15 20:36:05 -06:00
|
|
|
|
[dev.power64] cmd/5a, cmd/6a, cmd/8a, cmd/9a: make labels function-scoped
I removed support for jumping between functions years ago,
as part of doing the instruction layout for each function separately.
Given that, it makes sense to treat labels as function-scoped.
This lets each function have its own 'loop' label, for example.
Makes the assembly much cleaner and removes the last
reason anyone would reach for the 123(PC) form instead.
Note that this is on the dev.power64 branch, but it changes all
the assemblers. The change will ship in Go 1.5 (perhaps after
being ported into the new assembler).
Came up as part of CL 167730043.
LGTM=r
R=r
CC=austin, dave, golang-codereviews, minux
https://golang.org/cl/159670043
2014-10-28 19:50:16 -06:00
|
|
|
switch:
|
2013-07-15 20:36:05 -06:00
|
|
|
// Switch to m->g0 stack and back.
|
|
|
|
MOVQ (g_sched+gobuf_sp)(R14), R14
|
|
|
|
MOVQ SP, -8(R14)
|
|
|
|
LEAQ -8(R14), SP
|
|
|
|
CALL AX
|
|
|
|
MOVQ 0(SP), SP
|
2014-02-12 11:31:36 -07:00
|
|
|
|
[dev.power64] cmd/5a, cmd/6a, cmd/8a, cmd/9a: make labels function-scoped
I removed support for jumping between functions years ago,
as part of doing the instruction layout for each function separately.
Given that, it makes sense to treat labels as function-scoped.
This lets each function have its own 'loop' label, for example.
Makes the assembly much cleaner and removes the last
reason anyone would reach for the 123(PC) form instead.
Note that this is on the dev.power64 branch, but it changes all
the assemblers. The change will ship in Go 1.5 (perhaps after
being ported into the new assembler).
Came up as part of CL 167730043.
LGTM=r
R=r
CC=austin, dave, golang-codereviews, minux
https://golang.org/cl/159670043
2014-10-28 19:50:16 -06:00
|
|
|
ret:
|
2014-02-12 11:31:36 -07:00
|
|
|
MOVQ $0, m_libcallsp(R13)
|
2013-07-15 20:36:05 -06:00
|
|
|
RET
|
|
|
|
|
|
|
|
// Runs on OS stack. duration (in 100ns units) is in BX.
|
2015-09-03 01:48:21 -06:00
|
|
|
// The function leaves room for 4 syscall parameters
|
|
|
|
// (as per windows amd64 calling convention).
|
2016-05-25 18:01:25 -06:00
|
|
|
TEXT runtime·usleep2(SB),NOSPLIT|NOFRAME,$48
|
2014-07-09 22:23:50 -06:00
|
|
|
MOVQ SP, AX
|
|
|
|
ANDQ $~15, SP // alignment as per Windows requirement
|
2015-09-03 01:48:21 -06:00
|
|
|
MOVQ AX, 40(SP)
|
2013-07-15 20:36:05 -06:00
|
|
|
// Want negative 100ns units.
|
2013-03-07 07:18:48 -07:00
|
|
|
NEGQ BX
|
2015-09-03 01:48:21 -06:00
|
|
|
LEAQ 32(SP), R8 // ptime
|
2013-03-07 07:18:48 -07:00
|
|
|
MOVQ BX, (R8)
|
|
|
|
MOVQ $-1, CX // handle
|
|
|
|
MOVQ $0, DX // alertable
|
2014-11-18 17:55:15 -07:00
|
|
|
MOVQ runtime·_NtWaitForSingleObject(SB), AX
|
2013-03-07 07:18:48 -07:00
|
|
|
CALL AX
|
2015-09-03 01:48:21 -06:00
|
|
|
MOVQ 40(SP), SP
|
2013-03-07 07:18:48 -07:00
|
|
|
RET
|
runtime: implement time.now in assembly on plan9, solaris, windows
These all used a C implementation that contained 64-bit divide by 1000000000.
On 32-bit systems that ends up in the 64-bit C divide support, which makes
other calls and ends up using a fair amount of stack. We could convert them
to Go but then they'd still end up in software 64-bit divide code. That would
be okay, because Go code can split the stack, but it's still unnecessary.
Write time·now in assembly, just like on all the other systems, and use the
actual hardware support for 64/32 -> 64/32 division. This cuts the software
routines out entirely.
The actual code to do the division is copied and pasted from the sys_darwin_*.s files.
LGTM=alex.brainman
R=golang-codereviews, alex.brainman
CC=aram, golang-codereviews, iant, khr, r
https://golang.org/cl/136300043
2014-09-07 21:40:59 -06:00
|
|
|
|
2016-03-29 23:33:52 -06:00
|
|
|
// Runs on OS stack.
|
2016-05-25 18:01:25 -06:00
|
|
|
TEXT runtime·switchtothread(SB),NOSPLIT|NOFRAME,$0
|
2016-03-29 23:33:52 -06:00
|
|
|
MOVQ SP, AX
|
|
|
|
ANDQ $~15, SP // alignment as per Windows requirement
|
|
|
|
SUBQ $(48), SP // room for SP and 4 args as per Windows requirement
|
|
|
|
// plus one extra word to keep stack 16 bytes aligned
|
|
|
|
MOVQ AX, 32(SP)
|
|
|
|
MOVQ runtime·_SwitchToThread(SB), AX
|
|
|
|
CALL AX
|
|
|
|
MOVQ 32(SP), SP
|
|
|
|
RET
|
|
|
|
|
2017-02-03 17:26:13 -07:00
|
|
|
// See http://www.dcl.hpi.uni-potsdam.de/research/WRK/2007/08/getting-os-information-the-kuser_shared_data-structure/
|
|
|
|
// Must read hi1, then lo, then hi2. The snapshot is valid if hi1 == hi2.
|
|
|
|
#define _INTERRUPT_TIME 0x7ffe0008
|
|
|
|
#define _SYSTEM_TIME 0x7ffe0014
|
|
|
|
#define time_lo 0
|
|
|
|
#define time_hi1 4
|
|
|
|
#define time_hi2 8
|
|
|
|
|
|
|
|
TEXT runtime·nanotime(SB),NOSPLIT,$0-8
|
2017-04-20 07:51:36 -06:00
|
|
|
CMPB runtime·useQPCTime(SB), $0
|
|
|
|
JNE useQPC
|
2017-02-03 17:26:13 -07:00
|
|
|
MOVQ $_INTERRUPT_TIME, DI
|
|
|
|
loop:
|
|
|
|
MOVL time_hi1(DI), AX
|
|
|
|
MOVL time_lo(DI), BX
|
|
|
|
MOVL time_hi2(DI), CX
|
|
|
|
CMPL AX, CX
|
|
|
|
JNE loop
|
|
|
|
SHLQ $32, CX
|
|
|
|
ORQ BX, CX
|
|
|
|
IMULQ $100, CX
|
|
|
|
SUBQ runtime·startNano(SB), CX
|
|
|
|
MOVQ CX, ret+0(FP)
|
|
|
|
RET
|
2017-04-20 07:51:36 -06:00
|
|
|
useQPC:
|
|
|
|
JMP runtime·nanotimeQPC(SB)
|
|
|
|
RET
|
2017-02-03 17:26:13 -07:00
|
|
|
|
|
|
|
TEXT time·now(SB),NOSPLIT,$0-24
|
2017-04-20 07:51:36 -06:00
|
|
|
CMPB runtime·useQPCTime(SB), $0
|
|
|
|
JNE useQPC
|
2017-02-03 17:26:13 -07:00
|
|
|
MOVQ $_INTERRUPT_TIME, DI
|
|
|
|
loop:
|
|
|
|
MOVL time_hi1(DI), AX
|
|
|
|
MOVL time_lo(DI), BX
|
|
|
|
MOVL time_hi2(DI), CX
|
|
|
|
CMPL AX, CX
|
|
|
|
JNE loop
|
|
|
|
SHLQ $32, AX
|
|
|
|
ORQ BX, AX
|
|
|
|
IMULQ $100, AX
|
|
|
|
SUBQ runtime·startNano(SB), AX
|
|
|
|
MOVQ AX, mono+16(FP)
|
|
|
|
|
|
|
|
MOVQ $_SYSTEM_TIME, DI
|
|
|
|
wall:
|
|
|
|
MOVL time_hi1(DI), AX
|
|
|
|
MOVL time_lo(DI), BX
|
|
|
|
MOVL time_hi2(DI), CX
|
|
|
|
CMPL AX, CX
|
|
|
|
JNE wall
|
|
|
|
SHLQ $32, AX
|
|
|
|
ORQ BX, AX
|
|
|
|
MOVQ $116444736000000000, DI
|
|
|
|
SUBQ DI, AX
|
|
|
|
IMULQ $100, AX
|
runtime: implement time.now in assembly on plan9, solaris, windows
These all used a C implementation that contained 64-bit divide by 1000000000.
On 32-bit systems that ends up in the 64-bit C divide support, which makes
other calls and ends up using a fair amount of stack. We could convert them
to Go but then they'd still end up in software 64-bit divide code. That would
be okay, because Go code can split the stack, but it's still unnecessary.
Write time·now in assembly, just like on all the other systems, and use the
actual hardware support for 64/32 -> 64/32 division. This cuts the software
routines out entirely.
The actual code to do the division is copied and pasted from the sys_darwin_*.s files.
LGTM=alex.brainman
R=golang-codereviews, alex.brainman
CC=aram, golang-codereviews, iant, khr, r
https://golang.org/cl/136300043
2014-09-07 21:40:59 -06:00
|
|
|
|
|
|
|
// generated code for
|
|
|
|
// func f(x uint64) (uint64, uint64) { return x/1000000000, x%100000000 }
|
|
|
|
// adapted to reduce duplication
|
|
|
|
MOVQ AX, CX
|
|
|
|
MOVQ $1360296554856532783, AX
|
|
|
|
MULQ CX
|
|
|
|
ADDQ CX, DX
|
|
|
|
RCRQ $1, DX
|
|
|
|
SHRQ $29, DX
|
|
|
|
MOVQ DX, sec+0(FP)
|
|
|
|
IMULQ $1000000000, DX
|
|
|
|
SUBQ DX, CX
|
|
|
|
MOVL CX, nsec+8(FP)
|
|
|
|
RET
|
2017-04-20 07:51:36 -06:00
|
|
|
useQPC:
|
|
|
|
JMP runtime·nowQPC(SB)
|
|
|
|
RET
|