mirror of
https://github.com/golang/go
synced 2024-11-06 05:26:11 -07:00
f864d89ef7
For memmove/memclr using jump tables only reduces overall function performance for both amd64 and 386. Benchmarks for 32-bit memclr: name old time/op new time/op delta Memclr/5-8 8.01ns ± 0% 8.94ns ± 2% +11.59% (p=0.000 n=9+9) Memclr/16-8 9.05ns ± 0% 9.49ns ± 0% +4.81% (p=0.000 n=8+8) Memclr/64-8 9.15ns ± 0% 9.49ns ± 0% +3.76% (p=0.000 n=9+10) Memclr/256-8 16.6ns ± 0% 16.6ns ± 0% ~ (p=1.140 n=10+9) Memclr/4096-8 179ns ± 0% 166ns ± 0% -7.26% (p=0.000 n=9+8) Memclr/65536-8 3.36µs ± 1% 3.31µs ± 1% -1.48% (p=0.000 n=10+9) Memclr/1M-8 59.5µs ± 3% 60.5µs ± 2% +1.67% (p=0.009 n=10+10) Memclr/4M-8 239µs ± 3% 245µs ± 0% +2.49% (p=0.004 n=10+8) Memclr/8M-8 618µs ± 2% 614µs ± 1% ~ (p=0.315 n=10+8) Memclr/16M-8 1.49ms ± 2% 1.47ms ± 1% -1.11% (p=0.029 n=10+10) Memclr/64M-8 7.06ms ± 1% 7.05ms ± 0% ~ (p=0.573 n=10+8) [Geo mean] 3.36µs 3.39µs +1.14% For less predictable data, like loop iteration dependant sizes, branch table still shows 2-5% worse results. It also makes code slightly more complicated. This CL removes TODO note that directly suggest trying this optimization out. That encourages people to spend their time in a quite hopeless endeavour. The code used to implement branch table used a 32/64-entry table with pointers to TEXT blocks that implemented every associated label work. Most last entries point to "loop" code that is a fallthrough for all other sizes that do not map into specialized routines. The only inefficiency is extra MOVL/MOVQ required to fetch table pointer itself as MOVL $sym<>(SB)(AX*4) is not valid in Go asm (it works in other assemblers): TEXT ·memclrNew(SB), NOSPLIT, $0-8 MOVL ptr+0(FP), DI MOVL n+4(FP), BX // Handle 0 separately. TESTL BX, BX JEQ _0 LEAL -1(BX), CX // n-1 BSRL CX, CX // AX or X0 zeroed inside every text block. MOVL $memclrTable<>(SB), AX JMP (AX)(CX*4) _0: RET Change-Id: I4f706931b8127f85a8439b95834d5c2485a5d1bf Reviewed-on: https://go-review.googlesource.com/115678 Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
201 lines
4.3 KiB
ArmAsm
201 lines
4.3 KiB
ArmAsm
// Inferno's libkern/memmove-386.s
|
|
// https://bitbucket.org/inferno-os/inferno-os/src/default/libkern/memmove-386.s
|
|
//
|
|
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
|
|
// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
|
|
// Portions Copyright 2009 The Go Authors. All rights reserved.
|
|
//
|
|
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
// of this software and associated documentation files (the "Software"), to deal
|
|
// in the Software without restriction, including without limitation the rights
|
|
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
// copies of the Software, and to permit persons to whom the Software is
|
|
// furnished to do so, subject to the following conditions:
|
|
//
|
|
// The above copyright notice and this permission notice shall be included in
|
|
// all copies or substantial portions of the Software.
|
|
//
|
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
// THE SOFTWARE.
|
|
|
|
// +build !plan9
|
|
|
|
#include "textflag.h"
|
|
|
|
// func memmove(to, from unsafe.Pointer, n uintptr)
|
|
TEXT runtime·memmove(SB), NOSPLIT, $0-12
|
|
MOVL to+0(FP), DI
|
|
MOVL from+4(FP), SI
|
|
MOVL n+8(FP), BX
|
|
|
|
// REP instructions have a high startup cost, so we handle small sizes
|
|
// with some straightline code. The REP MOVSL instruction is really fast
|
|
// for large sizes. The cutover is approximately 1K. We implement up to
|
|
// 128 because that is the maximum SSE register load (loading all data
|
|
// into registers lets us ignore copy direction).
|
|
tail:
|
|
// BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing.
|
|
TESTL BX, BX
|
|
JEQ move_0
|
|
CMPL BX, $2
|
|
JBE move_1or2
|
|
CMPL BX, $4
|
|
JB move_3
|
|
JE move_4
|
|
CMPL BX, $8
|
|
JBE move_5through8
|
|
CMPL BX, $16
|
|
JBE move_9through16
|
|
CMPB runtime·support_sse2(SB), $1
|
|
JNE nosse2
|
|
CMPL BX, $32
|
|
JBE move_17through32
|
|
CMPL BX, $64
|
|
JBE move_33through64
|
|
CMPL BX, $128
|
|
JBE move_65through128
|
|
|
|
nosse2:
|
|
/*
|
|
* check and set for backwards
|
|
*/
|
|
CMPL SI, DI
|
|
JLS back
|
|
|
|
/*
|
|
* forward copy loop
|
|
*/
|
|
forward:
|
|
// If REP MOVSB isn't fast, don't use it
|
|
CMPB runtime·support_erms(SB), $1 // enhanced REP MOVSB/STOSB
|
|
JNE fwdBy4
|
|
|
|
// Check alignment
|
|
MOVL SI, AX
|
|
ORL DI, AX
|
|
TESTL $3, AX
|
|
JEQ fwdBy4
|
|
|
|
// Do 1 byte at a time
|
|
MOVL BX, CX
|
|
REP; MOVSB
|
|
RET
|
|
|
|
fwdBy4:
|
|
// Do 4 bytes at a time
|
|
MOVL BX, CX
|
|
SHRL $2, CX
|
|
ANDL $3, BX
|
|
REP; MOVSL
|
|
JMP tail
|
|
|
|
/*
|
|
* check overlap
|
|
*/
|
|
back:
|
|
MOVL SI, CX
|
|
ADDL BX, CX
|
|
CMPL CX, DI
|
|
JLS forward
|
|
/*
|
|
* whole thing backwards has
|
|
* adjusted addresses
|
|
*/
|
|
|
|
ADDL BX, DI
|
|
ADDL BX, SI
|
|
STD
|
|
|
|
/*
|
|
* copy
|
|
*/
|
|
MOVL BX, CX
|
|
SHRL $2, CX
|
|
ANDL $3, BX
|
|
|
|
SUBL $4, DI
|
|
SUBL $4, SI
|
|
REP; MOVSL
|
|
|
|
CLD
|
|
ADDL $4, DI
|
|
ADDL $4, SI
|
|
SUBL BX, DI
|
|
SUBL BX, SI
|
|
JMP tail
|
|
|
|
move_1or2:
|
|
MOVB (SI), AX
|
|
MOVB -1(SI)(BX*1), CX
|
|
MOVB AX, (DI)
|
|
MOVB CX, -1(DI)(BX*1)
|
|
RET
|
|
move_0:
|
|
RET
|
|
move_3:
|
|
MOVW (SI), AX
|
|
MOVB 2(SI), CX
|
|
MOVW AX, (DI)
|
|
MOVB CX, 2(DI)
|
|
RET
|
|
move_4:
|
|
// We need a separate case for 4 to make sure we write pointers atomically.
|
|
MOVL (SI), AX
|
|
MOVL AX, (DI)
|
|
RET
|
|
move_5through8:
|
|
MOVL (SI), AX
|
|
MOVL -4(SI)(BX*1), CX
|
|
MOVL AX, (DI)
|
|
MOVL CX, -4(DI)(BX*1)
|
|
RET
|
|
move_9through16:
|
|
MOVL (SI), AX
|
|
MOVL 4(SI), CX
|
|
MOVL -8(SI)(BX*1), DX
|
|
MOVL -4(SI)(BX*1), BP
|
|
MOVL AX, (DI)
|
|
MOVL CX, 4(DI)
|
|
MOVL DX, -8(DI)(BX*1)
|
|
MOVL BP, -4(DI)(BX*1)
|
|
RET
|
|
move_17through32:
|
|
MOVOU (SI), X0
|
|
MOVOU -16(SI)(BX*1), X1
|
|
MOVOU X0, (DI)
|
|
MOVOU X1, -16(DI)(BX*1)
|
|
RET
|
|
move_33through64:
|
|
MOVOU (SI), X0
|
|
MOVOU 16(SI), X1
|
|
MOVOU -32(SI)(BX*1), X2
|
|
MOVOU -16(SI)(BX*1), X3
|
|
MOVOU X0, (DI)
|
|
MOVOU X1, 16(DI)
|
|
MOVOU X2, -32(DI)(BX*1)
|
|
MOVOU X3, -16(DI)(BX*1)
|
|
RET
|
|
move_65through128:
|
|
MOVOU (SI), X0
|
|
MOVOU 16(SI), X1
|
|
MOVOU 32(SI), X2
|
|
MOVOU 48(SI), X3
|
|
MOVOU -64(SI)(BX*1), X4
|
|
MOVOU -48(SI)(BX*1), X5
|
|
MOVOU -32(SI)(BX*1), X6
|
|
MOVOU -16(SI)(BX*1), X7
|
|
MOVOU X0, (DI)
|
|
MOVOU X1, 16(DI)
|
|
MOVOU X2, 32(DI)
|
|
MOVOU X3, 48(DI)
|
|
MOVOU X4, -64(DI)(BX*1)
|
|
MOVOU X5, -48(DI)(BX*1)
|
|
MOVOU X6, -32(DI)(BX*1)
|
|
MOVOU X7, -16(DI)(BX*1)
|
|
RET
|