1
0
mirror of https://github.com/golang/go synced 2024-11-22 07:44:43 -07:00

sync/atomic: new package

Fixes #170.

R=gri, iant, r, r2
CC=golang-dev
https://golang.org/cl/4241041
This commit is contained in:
Russ Cox 2011-02-25 14:29:36 -05:00
parent 64f9e5a120
commit 22eab1f5c7
7 changed files with 807 additions and 0 deletions

View File

@ -131,6 +131,7 @@ DIRS=\
strconv\
strings\
sync\
sync/atomic\
syscall\
syslog\
tabwriter\

View File

@ -0,0 +1,18 @@
# Copyright 2011 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
include ../../../Make.inc
TARG=sync/atomic
GOFILES=\
doc.go\
OFILES=\
asm_$(GOARCH).$O\
ifeq ($(GOARCH),arm)
OFILES+=asm_$(GOOS)_$(GOARCH).$O
endif
include ../../../Make.pkg

View File

@ -0,0 +1,87 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
TEXT ·CompareAndSwapInt32(SB),7,$0
JMP ·CompareAndSwapUint32(SB)
TEXT ·CompareAndSwapUint32(SB),7,$0
MOVL valptr+0(FP), BP
MOVL old+4(FP), AX
MOVL new+8(FP), CX
// CMPXCHGL was introduced on the 486.
LOCK
CMPXCHGL CX, 0(BP)
SETEQ ret+12(FP)
RET
TEXT ·CompareAndSwapUintptr(SB),7,$0
JMP ·CompareAndSwapUint32(SB)
TEXT ·CompareAndSwapInt64(SB),7,$0
JMP ·CompareAndSwapUint64(SB)
TEXT ·CompareAndSwapUint64(SB),7,$0
MOVL valptr+0(FP), BP
MOVL oldlo+4(FP), AX
MOVL oldhi+8(FP), DX
MOVL newlo+12(FP), BX
MOVL newhi+16(FP), CX
// CMPXCHG8B was introduced on the Pentium.
LOCK
CMPXCHG8B 0(BP)
SETEQ ret+20(FP)
RET
TEXT ·AddInt32(SB),7,$0
JMP ·AddUint32(SB)
TEXT ·AddUint32(SB),7,$0
MOVL valptr+0(FP), BP
MOVL delta+4(FP), AX
MOVL AX, CX
// XADD was introduced on the 486.
LOCK
XADDL AX, 0(BP)
ADDL AX, CX
MOVL CX, ret+8(FP)
RET
TEXT ·AddUintptr(SB),7,$0
JMP ·AddUint32(SB)
TEXT ·AddInt64(SB),7,$0
JMP ·AddUint64(SB)
TEXT ·AddUint64(SB),7,$0
// no XADDQ so use CMPXCHG8B loop
MOVL valptr+0(FP), BP
// DI:SI = delta
MOVL deltalo+4(FP), SI
MOVL deltahi+8(FP), DI
// DX:AX = *valptr
MOVL 0(BP), AX
MOVL 4(BP), DX
addloop:
// CX:BX = DX:AX (*valptr) + DI:SI (delta)
MOVL AX, BX
MOVL DX, CX
ADDL SI, BX
ADCL DI, CX
// if *valptr == DX:AX {
// *valptr = CX:BX
// } else {
// DX:AX = *valptr
// }
// all in one instruction
LOCK
CMPXCHG8B 0(BP)
JNZ addloop
// success
// return CX:BX
MOVL BX, retlo+12(FP)
MOVL CX, rethi+16(FP)
RET

View File

@ -0,0 +1,59 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
TEXT ·CompareAndSwapInt32(SB),7,$0
JMP ·CompareAndSwapUint32(SB)
TEXT ·CompareAndSwapUint32(SB),7,$0
MOVQ valptr+0(FP), BP
MOVL old+8(FP), AX
MOVL new+12(FP), CX
LOCK
CMPXCHGL CX, 0(BP)
SETEQ ret+16(FP)
RET
TEXT ·CompareAndSwapUintptr(SB),7,$0
JMP ·CompareAndSwapUint64(SB)
TEXT ·CompareAndSwapInt64(SB),7,$0
JMP ·CompareAndSwapUint64(SB)
TEXT ·CompareAndSwapUint64(SB),7,$0
MOVQ valptr+0(FP), BP
MOVQ old+8(FP), AX
MOVQ new+16(FP), CX
LOCK
CMPXCHGQ CX, 0(BP)
SETEQ ret+24(FP)
RET
TEXT ·AddInt32(SB),7,$0
JMP ·AddUint32(SB)
TEXT ·AddUint32(SB),7,$0
MOVQ valptr+0(FP), BP
MOVL delta+8(FP), AX
MOVL AX, CX
LOCK
XADDL AX, 0(BP)
ADDL AX, CX
MOVL CX, ret+16(FP)
RET
TEXT ·AddUintptr(SB),7,$0
JMP ·AddUint64(SB)
TEXT ·AddInt64(SB),7,$0
JMP ·AddUint64(SB)
TEXT ·AddUint64(SB),7,$0
MOVQ valptr+0(FP), BP
MOVQ delta+8(FP), AX
MOVQ AX, CX
LOCK
XADDQ AX, 0(BP)
ADDQ AX, CX
MOVQ CX, ret+16(FP)
RET

View File

@ -0,0 +1,78 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// ARM atomic operations, for use by asm_$(GOOS)_arm.s.
TEXT ·armCompareAndSwapUint32(SB),7,$0
MOVW valptr+0(FP), R1
MOVW old+4(FP), R2
MOVW new+8(FP), R3
casloop:
// LDREX and STREX were introduced in ARM 6.
LDREX (R1), R0
CMP R0, R2
BNE casfail
STREX R3, (R1), R0
CMP $0, R0
BNE casloop
MOVW $1, R0
MOVBU R0, ret+12(FP)
RET
casfail:
MOVW $0, R0
MOVBU R0, ret+12(FP)
RET
TEXT ·armCompareAndSwapUint64(SB),7,$0
MOVW valptr+0(FP), R1
MOVW oldlo+4(FP), R2
MOVW oldhi+8(FP), R3
MOVW newlo+12(FP), R4
MOVW newhi+16(FP), R5
cas64loop:
// LDREXD and STREXD were introduced in ARM 11.
LDREXD (R1), R6 // loads R6 and R7
CMP R2, R6
BNE cas64fail
CMP R3, R7
BNE cas64fail
STREXD R4, (R1), R0 // stores R4 and R5
CMP $0, R0
BNE cas64loop
MOVW $1, R0
MOVBU R0, ret+20(FP)
RET
cas64fail:
MOVW $0, R0
MOVBU R0, ret+20(FP)
RET
TEXT ·armAddUint32(SB),7,$0
MOVW valptr+0(FP), R1
MOVW delta+4(FP), R2
addloop:
// LDREX and STREX were introduced in ARM 6.
LDREX (R1), R3
ADD R2, R3
STREX R3, (R1), R0
CMP $0, R0
BNE addloop
MOVW R3, ret+8(FP)
RET
TEXT ·armAddUint64(SB),7,$0
MOVW valptr+0(FP), R1
MOVW deltalo+4(FP), R2
MOVW deltahi+8(FP), R3
add64loop:
// LDREXD and STREXD were introduced in ARM 11.
LDREXD (R1), R4 // loads R4 and R5
ADD.S R2, R4
ADC R3, R5
STREXD R4, (R1), R0 // stores R4 and R5
CMP $0, R0
BNE add64loop
MOVW R4, retlo+12(FP)
MOVW R5, rethi+16(FP)
RET

View File

@ -0,0 +1,506 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package atomic
import (
"runtime"
"testing"
"unsafe"
)
// Tests of correct behavior, without contention.
// (Does the function work as advertised?)
//
// Test that the Add functions add correctly.
// Test that the CompareAndSwap functions actually
// do the comparison and the swap correctly.
//
// The loop over power-of-two values is meant to
// ensure that the operations apply to the full word size.
// The struct fields x.before and x.after check that the
// operations do not extend past the full word size.
const (
magic32 = 0xdedbeef
magic64 = 0xdeddeadbeefbeef
)
func TestAddInt32(t *testing.T) {
var x struct {
before int32
i int32
after int32
}
x.before = magic32
x.after = magic32
var j int32
for delta := int32(1); delta+delta > delta; delta += delta {
k := AddInt32(&x.i, delta)
j += delta
if x.i != j || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
}
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestAddUint32(t *testing.T) {
var x struct {
before uint32
i uint32
after uint32
}
x.before = magic32
x.after = magic32
var j uint32
for delta := uint32(1); delta+delta > delta; delta += delta {
k := AddUint32(&x.i, delta)
j += delta
if x.i != j || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
}
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestAddInt64(t *testing.T) {
var x struct {
before int64
i int64
after int64
}
x.before = magic64
x.after = magic64
var j int64
for delta := int64(1); delta+delta > delta; delta += delta {
k := AddInt64(&x.i, delta)
j += delta
if x.i != j || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
}
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, int64(magic64), int64(magic64))
}
}
func TestAddUint64(t *testing.T) {
var x struct {
before uint64
i uint64
after uint64
}
x.before = magic64
x.after = magic64
var j uint64
for delta := uint64(1); delta+delta > delta; delta += delta {
k := AddUint64(&x.i, delta)
j += delta
if x.i != j || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
}
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64))
}
}
func TestAddUintptr(t *testing.T) {
var x struct {
before uintptr
i uintptr
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
var j uintptr
for delta := uintptr(1); delta+delta > delta; delta += delta {
k := AddUintptr(&x.i, delta)
j += delta
if x.i != j || k != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
}
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
func TestCompareAndSwapInt32(t *testing.T) {
var x struct {
before int32
i int32
after int32
}
x.before = magic32
x.after = magic32
for val := int32(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapInt32(&x.i, val, val+1) {
t.Errorf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapInt32(&x.i, val, val+2) {
t.Errorf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestCompareAndSwapUint32(t *testing.T) {
var x struct {
before uint32
i uint32
after uint32
}
x.before = magic32
x.after = magic32
for val := uint32(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapUint32(&x.i, val, val+1) {
t.Errorf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapUint32(&x.i, val, val+2) {
t.Errorf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic32 || x.after != magic32 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
}
}
func TestCompareAndSwapInt64(t *testing.T) {
var x struct {
before int64
i int64
after int64
}
x.before = magic64
x.after = magic64
for val := int64(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapInt64(&x.i, val, val+1) {
t.Errorf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapInt64(&x.i, val, val+2) {
t.Errorf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64))
}
}
func TestCompareAndSwapUint64(t *testing.T) {
var x struct {
before uint64
i uint64
after uint64
}
x.before = magic64
x.after = magic64
for val := uint64(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapUint64(&x.i, val, val+1) {
t.Errorf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapUint64(&x.i, val, val+2) {
t.Errorf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64))
}
}
func TestCompareAndSwapUintptr(t *testing.T) {
var x struct {
before uintptr
i uintptr
after uintptr
}
var m uint64 = magic64
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
for val := uintptr(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapUintptr(&x.i, val, val+1) {
t.Errorf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapUintptr(&x.i, val, val+2) {
t.Errorf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magicptr || x.after != magicptr {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
}
}
// Tests of correct behavior, with contention.
// (Is the function atomic?)
//
// For each function, we write a "hammer" function that repeatedly
// uses the atomic operation to add 1 to a value. After running
// multiple hammers in parallel, check that we end with the correct
// total.
var hammer32 = []struct {
name string
f func(*uint32, int)
}{
{"AddInt32", hammerAddInt32},
{"AddUint32", hammerAddUint32},
{"AddUintptr", hammerAddUintptr32},
{"CompareAndSwapInt32", hammerCompareAndSwapInt32},
{"CompareAndSwapUint32", hammerCompareAndSwapUint32},
{"CompareAndSwapUintptr", hammerCompareAndSwapUintptr32},
}
func init() {
var v uint64 = 1 << 50
if uintptr(v) != 0 {
// 64-bit system; clear uintptr tests
hammer32[2].f = nil
hammer32[5].f = nil
}
}
func hammerAddInt32(uval *uint32, count int) {
val := (*int32)(unsafe.Pointer(uval))
for i := 0; i < count; i++ {
AddInt32(val, 1)
}
}
func hammerAddUint32(val *uint32, count int) {
for i := 0; i < count; i++ {
AddUint32(val, 1)
}
}
func hammerAddUintptr32(uval *uint32, count int) {
// only safe when uintptr is 32-bit.
// not called on 64-bit systems.
val := (*uintptr)(unsafe.Pointer(uval))
for i := 0; i < count; i++ {
AddUintptr(val, 1)
}
}
func hammerCompareAndSwapInt32(uval *uint32, count int) {
val := (*int32)(unsafe.Pointer(uval))
for i := 0; i < count; i++ {
for {
v := *val
if CompareAndSwapInt32(val, v, v+1) {
break
}
}
}
}
func hammerCompareAndSwapUint32(val *uint32, count int) {
for i := 0; i < count; i++ {
for {
v := *val
if CompareAndSwapUint32(val, v, v+1) {
break
}
}
}
}
func hammerCompareAndSwapUintptr32(uval *uint32, count int) {
// only safe when uintptr is 32-bit.
// not called on 64-bit systems.
val := (*uintptr)(unsafe.Pointer(uval))
for i := 0; i < count; i++ {
for {
v := *val
if CompareAndSwapUintptr(val, v, v+1) {
break
}
}
}
}
func TestHammer32(t *testing.T) {
const (
n = 100000
p = 4
)
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))
for _, tt := range hammer32 {
if tt.f == nil {
continue
}
c := make(chan int)
var val uint32
for i := 0; i < p; i++ {
go func() {
tt.f(&val, n)
c <- 1
}()
}
for i := 0; i < p; i++ {
<-c
}
if val != n*p {
t.Errorf("%s: val=%d want %d", tt.name, val, n*p)
}
}
}
var hammer64 = []struct {
name string
f func(*uint64, int)
}{
{"AddInt64", hammerAddInt64},
{"AddUint64", hammerAddUint64},
{"AddUintptr", hammerAddUintptr64},
{"CompareAndSwapInt64", hammerCompareAndSwapInt64},
{"CompareAndSwapUint64", hammerCompareAndSwapUint64},
{"CompareAndSwapUintptr", hammerCompareAndSwapUintptr64},
}
func init() {
var v uint64 = 1 << 50
if uintptr(v) == 0 {
// 32-bit system; clear uintptr tests
hammer64[2].f = nil
hammer64[5].f = nil
}
}
func hammerAddInt64(uval *uint64, count int) {
val := (*int64)(unsafe.Pointer(uval))
for i := 0; i < count; i++ {
AddInt64(val, 1)
}
}
func hammerAddUint64(val *uint64, count int) {
for i := 0; i < count; i++ {
AddUint64(val, 1)
}
}
func hammerAddUintptr64(uval *uint64, count int) {
// only safe when uintptr is 64-bit.
// not called on 32-bit systems.
val := (*uintptr)(unsafe.Pointer(uval))
for i := 0; i < count; i++ {
AddUintptr(val, 1)
}
}
func hammerCompareAndSwapInt64(uval *uint64, count int) {
val := (*int64)(unsafe.Pointer(uval))
for i := 0; i < count; i++ {
for {
v := *val
if CompareAndSwapInt64(val, v, v+1) {
break
}
}
}
}
func hammerCompareAndSwapUint64(val *uint64, count int) {
for i := 0; i < count; i++ {
for {
v := *val
if CompareAndSwapUint64(val, v, v+1) {
break
}
}
}
}
func hammerCompareAndSwapUintptr64(uval *uint64, count int) {
// only safe when uintptr is 64-bit.
// not called on 32-bit systems.
val := (*uintptr)(unsafe.Pointer(uval))
for i := 0; i < count; i++ {
for {
v := *val
if CompareAndSwapUintptr(val, v, v+1) {
break
}
}
}
}
func TestHammer64(t *testing.T) {
const (
n = 100000
p = 4
)
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))
for _, tt := range hammer64 {
if tt.f == nil {
continue
}
c := make(chan int)
var val uint64
for i := 0; i < p; i++ {
go func() {
tt.f(&val, n)
c <- 1
}()
}
for i := 0; i < p; i++ {
<-c
}
if val != n*p {
t.Errorf("%s: val=%d want %d", tt.name, val, n*p)
}
}
}

View File

@ -0,0 +1,58 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package atomic provides low-level atomic memory primitives
// useful for implementing synchronization algorithms.
//
// These functions require great care to be used correctly.
// Except for special, low-level applications, synchronization is better
// done with channels or the facilities of the sync package.
// Share memory by communicating;
// don't communicate by sharing memory.
//
// The compare-and-swap operation, implemented by the CompareAndSwapT
// functions, is the atomic equivalent of:
//
// if *val == old {
// *val = new
// return true
// }
// return false
//
package atomic
// BUG(rsc):
// On ARM, the 64-bit functions use instructions unavailable before ARM 11.
//
// On x86-32, the 64-bit functions use instructions unavailable before the Pentium.
// CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value.
func CompareAndSwapInt32(val *int32, old, new int32) (swapped bool)
// CompareAndSwapInt64 executes the compare-and-swap operation for an int64 value.
func CompareAndSwapInt64(val *int64, old, new int64) (swapped bool)
// CompareAndSwapUint32 executes the compare-and-swap operation for a uint32 value.
func CompareAndSwapUint32(val *uint32, old, new uint32) (swapped bool)
// CompareAndSwapUint64 executes the compare-and-swap operation for a uint64 value.
func CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool)
// CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value.
func CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool)
// AddInt32 atomically adds delta to *val and returns the new value.
func AddInt32(val *int32, delta int32) (new int32)
// AddUint32 atomically adds delta to *val and returns the new value.
func AddUint32(val *uint32, delta uint32) (new uint32)
// AddInt64 atomically adds delta to *val and returns the new value.
func AddInt64(val *int64, delta int64) (new int64)
// AddUint64 atomically adds delta to *val and returns the new value.
func AddUint64(val *uint64, delta uint64) (new uint64)
// AddUintptr atomically adds delta to *val and returns the new value.
func AddUintptr(val *uintptr, delta uintptr) (new uintptr)