1
0
mirror of https://github.com/golang/go synced 2024-11-19 16:24:45 -07:00
go/src/runtime/memmove_plan9_amd64.s

134 lines
3.0 KiB
ArmAsm
Raw Normal View History

// Derived from Inferno's libkern/memmove-386.s (adapted for amd64)
// http://code.google.com/p/inferno-os/source/browse/libkern/memmove-386.s
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
// Portions Copyright 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "textflag.h"
// void runtime·memmove(void*, void*, uintptr)
TEXT runtime·memmove(SB), NOSPLIT, $0-24
MOVQ to+0(FP), DI
cmd/cc, runtime: convert C compilers to use Go calling convention To date, the C compilers and Go compilers differed only in how values were returned from functions. This made it difficult to call Go from C or C from Go if return values were involved. It also made assembly called from Go and assembly called from C different. This CL changes the C compiler to use the Go conventions, passing results on the stack, after the arguments. [Exception: this does not apply to C ... functions, because you can't know where on the stack the arguments end.] By doing this, the CL makes it possible to rewrite C functions into Go one at a time, without worrying about which languages call that function or which languages it calls. This CL also updates all the assembly files in package runtime to use the new conventions. Argument references of the form 40(SP) have been rewritten to the form name+10(FP) instead, and there are now Go func prototypes for every assembly function called from C or Go. This means that 'go vet runtime' checks effectively every assembly function, and go vet's output was used to automate the bulk of the conversion. Some functions, like seek and nsec on Plan 9, needed to be rewritten. Many assembly routines called from C were reading arguments incorrectly, using MOVL instead of MOVQ or vice versa, especially on the less used systems like openbsd. These were found by go vet and have been corrected too. If we're lucky, this may reduce flakiness on those systems. Tested on: darwin/386 darwin/amd64 linux/arm linux/386 linux/amd64 If this breaks another system, the bug is almost certainly in the sys_$GOOS_$GOARCH.s file, since the rest of the CL is tested by the combination of the above systems. LGTM=dvyukov, iant R=golang-codereviews, 0intro, dave, alex.brainman, dvyukov, iant CC=golang-codereviews, josharian, r https://golang.org/cl/135830043
2014-08-27 09:32:17 -06:00
MOVQ from+8(FP), SI
MOVQ n+16(FP), BX
// REP instructions have a high startup cost, so we handle small sizes
// with some straightline code. The REP MOVSQ instruction is really fast
// for large sizes. The cutover is approximately 1K.
tail:
TESTQ BX, BX
JEQ move_0
CMPQ BX, $2
JBE move_1or2
CMPQ BX, $4
JBE move_3or4
CMPQ BX, $8
JB move_5through7
JE move_8
CMPQ BX, $16
JBE move_9through16
/*
* check and set for backwards
*/
CMPQ SI, DI
JLS back
/*
* forward copy loop
*/
forward:
MOVQ BX, CX
SHRQ $3, CX
ANDQ $7, BX
REP; MOVSQ
JMP tail
back:
/*
* check overlap
*/
MOVQ SI, CX
ADDQ BX, CX
CMPQ CX, DI
JLS forward
/*
* whole thing backwards has
* adjusted addresses
*/
ADDQ BX, DI
ADDQ BX, SI
STD
/*
* copy
*/
MOVQ BX, CX
SHRQ $3, CX
ANDQ $7, BX
SUBQ $8, DI
SUBQ $8, SI
REP; MOVSQ
CLD
ADDQ $8, DI
ADDQ $8, SI
SUBQ BX, DI
SUBQ BX, SI
JMP tail
move_1or2:
MOVB (SI), AX
MOVB -1(SI)(BX*1), CX
MOVB AX, (DI)
MOVB CX, -1(DI)(BX*1)
RET
move_0:
RET
move_3or4:
MOVW (SI), AX
MOVW -2(SI)(BX*1), CX
MOVW AX, (DI)
MOVW CX, -2(DI)(BX*1)
RET
move_5through7:
MOVL (SI), AX
MOVL -4(SI)(BX*1), CX
MOVL AX, (DI)
MOVL CX, -4(DI)(BX*1)
RET
move_8:
// We need a separate case for 8 to make sure we write pointers atomically.
MOVQ (SI), AX
MOVQ AX, (DI)
RET
move_9through16:
MOVQ (SI), AX
MOVQ -8(SI)(BX*1), CX
MOVQ AX, (DI)
MOVQ CX, -8(DI)(BX*1)
RET