From 0932dc21180642ce1ff095b9b3e68b06c6f440b3 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Fri, 13 Nov 2020 21:08:26 -0500 Subject: [PATCH] runtime: declare arg size/map for race version of sync/atomic functions The argument size and map are used in stack scanning if those functions are deferred. Declare the right argument size and map so they can be scanned correctly. Fixes #42599. Change-Id: I74f9409d574cf7c383f4d8f83e38521026b48861 Reviewed-on: https://go-review.googlesource.com/c/go/+/270079 Trust: Cherry Zhang Run-TryBot: Cherry Zhang Reviewed-by: Keith Randall --- src/runtime/race/testdata/atomic_test.go | 24 ++++++++ src/runtime/race_amd64.s | 78 ++++++++++++++++-------- src/runtime/race_arm64.s | 52 ++++++++-------- src/runtime/race_ppc64le.s | 78 ++++++++++++++++-------- 4 files changed, 154 insertions(+), 78 deletions(-) diff --git a/src/runtime/race/testdata/atomic_test.go b/src/runtime/race/testdata/atomic_test.go index 769c8d7398d..4ce72604a4c 100644 --- a/src/runtime/race/testdata/atomic_test.go +++ b/src/runtime/race/testdata/atomic_test.go @@ -299,3 +299,27 @@ func TestNoRaceAtomicCrash(t *testing.T) { }() atomic.AddInt32(nilptr, 1) } + +func TestNoRaceDeferAtomicStore(t *testing.T) { + // Test that when an atomic function is deferred directly, the + // GC scans it correctly. See issue 42599. + type foo struct { + bar int64 + } + + var doFork func(f *foo, depth int) + doFork = func(f *foo, depth int) { + atomic.StoreInt64(&f.bar, 1) + defer atomic.StoreInt64(&f.bar, 0) + if depth > 0 { + for i := 0; i < 2; i++ { + f2 := &foo{} + go doFork(f2, depth-1) + } + } + runtime.GC() + } + + f := &foo{} + doFork(f, 11) +} diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s index 4a86b3371a5..9818bc6ddff 100644 --- a/src/runtime/race_amd64.s +++ b/src/runtime/race_amd64.s @@ -207,110 +207,136 @@ TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0 // Atomic operations for sync/atomic package. // Load -TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-12 + GO_ARGS MOVQ $__tsan_go_atomic32_load(SB), AX CALL racecallatomic<>(SB) RET -TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-16 + GO_ARGS MOVQ $__tsan_go_atomic64_load(SB), AX CALL racecallatomic<>(SB) RET -TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-12 + GO_ARGS JMP sync∕atomic·LoadInt32(SB) -TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-16 + GO_ARGS JMP sync∕atomic·LoadInt64(SB) -TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-16 + GO_ARGS JMP sync∕atomic·LoadInt64(SB) -TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-16 + GO_ARGS JMP sync∕atomic·LoadInt64(SB) // Store -TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-12 + GO_ARGS MOVQ $__tsan_go_atomic32_store(SB), AX CALL racecallatomic<>(SB) RET -TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-16 + GO_ARGS MOVQ $__tsan_go_atomic64_store(SB), AX CALL racecallatomic<>(SB) RET -TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-12 + GO_ARGS JMP sync∕atomic·StoreInt32(SB) -TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-16 + GO_ARGS JMP sync∕atomic·StoreInt64(SB) -TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-16 + GO_ARGS JMP sync∕atomic·StoreInt64(SB) // Swap -TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-20 + GO_ARGS MOVQ $__tsan_go_atomic32_exchange(SB), AX CALL racecallatomic<>(SB) RET -TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-24 + GO_ARGS MOVQ $__tsan_go_atomic64_exchange(SB), AX CALL racecallatomic<>(SB) RET -TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-20 + GO_ARGS JMP sync∕atomic·SwapInt32(SB) -TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-24 + GO_ARGS JMP sync∕atomic·SwapInt64(SB) -TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-24 + GO_ARGS JMP sync∕atomic·SwapInt64(SB) // Add -TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20 + GO_ARGS MOVQ $__tsan_go_atomic32_fetch_add(SB), AX CALL racecallatomic<>(SB) MOVL add+8(FP), AX // convert fetch_add to add_fetch ADDL AX, ret+16(FP) RET -TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-24 + GO_ARGS MOVQ $__tsan_go_atomic64_fetch_add(SB), AX CALL racecallatomic<>(SB) MOVQ add+8(FP), AX // convert fetch_add to add_fetch ADDQ AX, ret+16(FP) RET -TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-20 + GO_ARGS JMP sync∕atomic·AddInt32(SB) -TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-24 + GO_ARGS JMP sync∕atomic·AddInt64(SB) -TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24 + GO_ARGS JMP sync∕atomic·AddInt64(SB) // CompareAndSwap -TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17 + GO_ARGS MOVQ $__tsan_go_atomic32_compare_exchange(SB), AX CALL racecallatomic<>(SB) RET -TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25 + GO_ARGS MOVQ $__tsan_go_atomic64_compare_exchange(SB), AX CALL racecallatomic<>(SB) RET -TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-17 + GO_ARGS JMP sync∕atomic·CompareAndSwapInt32(SB) -TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-25 + GO_ARGS JMP sync∕atomic·CompareAndSwapInt64(SB) -TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25 + GO_ARGS JMP sync∕atomic·CompareAndSwapInt64(SB) // Generic atomic operation implementation. diff --git a/src/runtime/race_arm64.s b/src/runtime/race_arm64.s index 6bc389f69f4..8aa17742b80 100644 --- a/src/runtime/race_arm64.s +++ b/src/runtime/race_arm64.s @@ -200,86 +200,86 @@ TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0 // R0, R1, R2 set in racecallatomic // Load -TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0 +TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-12 GO_ARGS MOVD $__tsan_go_atomic32_load(SB), R9 BL racecallatomic<>(SB) RET -TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0 +TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-16 GO_ARGS MOVD $__tsan_go_atomic64_load(SB), R9 BL racecallatomic<>(SB) RET -TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0 +TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-12 GO_ARGS JMP sync∕atomic·LoadInt32(SB) -TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0 +TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-16 GO_ARGS JMP sync∕atomic·LoadInt64(SB) -TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0 +TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-16 GO_ARGS JMP sync∕atomic·LoadInt64(SB) -TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0 +TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-16 GO_ARGS JMP sync∕atomic·LoadInt64(SB) // Store -TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0 +TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-12 GO_ARGS MOVD $__tsan_go_atomic32_store(SB), R9 BL racecallatomic<>(SB) RET -TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0 +TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-16 GO_ARGS MOVD $__tsan_go_atomic64_store(SB), R9 BL racecallatomic<>(SB) RET -TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0 +TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-12 GO_ARGS JMP sync∕atomic·StoreInt32(SB) -TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0 +TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-16 GO_ARGS JMP sync∕atomic·StoreInt64(SB) -TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0 +TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-16 GO_ARGS JMP sync∕atomic·StoreInt64(SB) // Swap -TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0 +TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-20 GO_ARGS MOVD $__tsan_go_atomic32_exchange(SB), R9 BL racecallatomic<>(SB) RET -TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0 +TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-24 GO_ARGS MOVD $__tsan_go_atomic64_exchange(SB), R9 BL racecallatomic<>(SB) RET -TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0 +TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-20 GO_ARGS JMP sync∕atomic·SwapInt32(SB) -TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0 +TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-24 GO_ARGS JMP sync∕atomic·SwapInt64(SB) -TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0 +TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-24 GO_ARGS JMP sync∕atomic·SwapInt64(SB) // Add -TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0 +TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20 GO_ARGS MOVD $__tsan_go_atomic32_fetch_add(SB), R9 BL racecallatomic<>(SB) @@ -289,7 +289,7 @@ TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0 MOVW R0, ret+16(FP) RET -TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0 +TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-24 GO_ARGS MOVD $__tsan_go_atomic64_fetch_add(SB), R9 BL racecallatomic<>(SB) @@ -299,40 +299,40 @@ TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0 MOVD R0, ret+16(FP) RET -TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0 +TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-20 GO_ARGS JMP sync∕atomic·AddInt32(SB) -TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0 +TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-24 GO_ARGS JMP sync∕atomic·AddInt64(SB) -TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0 +TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24 GO_ARGS JMP sync∕atomic·AddInt64(SB) // CompareAndSwap -TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0 +TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17 GO_ARGS MOVD $__tsan_go_atomic32_compare_exchange(SB), R9 BL racecallatomic<>(SB) RET -TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0 +TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25 GO_ARGS MOVD $__tsan_go_atomic64_compare_exchange(SB), R9 BL racecallatomic<>(SB) RET -TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0 +TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-17 GO_ARGS JMP sync∕atomic·CompareAndSwapInt32(SB) -TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0 +TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-25 GO_ARGS JMP sync∕atomic·CompareAndSwapInt64(SB) -TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0 +TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25 GO_ARGS JMP sync∕atomic·CompareAndSwapInt64(SB) diff --git a/src/runtime/race_ppc64le.s b/src/runtime/race_ppc64le.s index 7421d539ca5..8961254ea6e 100644 --- a/src/runtime/race_ppc64le.s +++ b/src/runtime/race_ppc64le.s @@ -207,78 +207,95 @@ TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0 // R3, R4, R5 set in racecallatomic // Load atomic in tsan -TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-12 + GO_ARGS // void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a); MOVD $__tsan_go_atomic32_load(SB), R8 ADD $32, R1, R6 // addr of caller's 1st arg BR racecallatomic<>(SB) RET -TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-16 + GO_ARGS // void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a); MOVD $__tsan_go_atomic64_load(SB), R8 ADD $32, R1, R6 // addr of caller's 1st arg BR racecallatomic<>(SB) RET -TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-12 + GO_ARGS BR sync∕atomic·LoadInt32(SB) -TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-16 + GO_ARGS BR sync∕atomic·LoadInt64(SB) -TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-16 + GO_ARGS BR sync∕atomic·LoadInt64(SB) -TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-16 + GO_ARGS BR sync∕atomic·LoadInt64(SB) // Store atomic in tsan -TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-12 + GO_ARGS // void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a); MOVD $__tsan_go_atomic32_store(SB), R8 ADD $32, R1, R6 // addr of caller's 1st arg BR racecallatomic<>(SB) -TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-16 + GO_ARGS // void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a); MOVD $__tsan_go_atomic64_store(SB), R8 ADD $32, R1, R6 // addr of caller's 1st arg BR racecallatomic<>(SB) -TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-12 + GO_ARGS BR sync∕atomic·StoreInt32(SB) -TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-16 + GO_ARGS BR sync∕atomic·StoreInt64(SB) -TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-16 + GO_ARGS BR sync∕atomic·StoreInt64(SB) // Swap in tsan -TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-20 + GO_ARGS // void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a); MOVD $__tsan_go_atomic32_exchange(SB), R8 ADD $32, R1, R6 // addr of caller's 1st arg BR racecallatomic<>(SB) -TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-24 + GO_ARGS // void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) MOVD $__tsan_go_atomic64_exchange(SB), R8 ADD $32, R1, R6 // addr of caller's 1st arg BR racecallatomic<>(SB) -TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-20 + GO_ARGS BR sync∕atomic·SwapInt32(SB) -TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-24 + GO_ARGS BR sync∕atomic·SwapInt64(SB) -TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-24 + GO_ARGS BR sync∕atomic·SwapInt64(SB) // Add atomic in tsan -TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20 + GO_ARGS // void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a); MOVD $__tsan_go_atomic32_fetch_add(SB), R8 ADD $64, R1, R6 // addr of caller's 1st arg @@ -291,7 +308,8 @@ TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-0 MOVW R3, ret+16(FP) RET -TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-24 + GO_ARGS // void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a); MOVD $__tsan_go_atomic64_fetch_add(SB), R8 ADD $64, R1, R6 // addr of caller's 1st arg @@ -304,37 +322,45 @@ TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-0 MOVD R3, ret+16(FP) RET -TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-20 + GO_ARGS BR sync∕atomic·AddInt32(SB) -TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-24 + GO_ARGS BR sync∕atomic·AddInt64(SB) -TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24 + GO_ARGS BR sync∕atomic·AddInt64(SB) // CompareAndSwap in tsan -TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17 + GO_ARGS // void __tsan_go_atomic32_compare_exchange( // ThreadState *thr, uptr cpc, uptr pc, u8 *a) MOVD $__tsan_go_atomic32_compare_exchange(SB), R8 ADD $32, R1, R6 // addr of caller's 1st arg BR racecallatomic<>(SB) -TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25 + GO_ARGS // void __tsan_go_atomic32_compare_exchange( // ThreadState *thr, uptr cpc, uptr pc, u8 *a) MOVD $__tsan_go_atomic64_compare_exchange(SB), R8 ADD $32, R1, R6 // addr of caller's 1st arg BR racecallatomic<>(SB) -TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-17 + GO_ARGS BR sync∕atomic·CompareAndSwapInt32(SB) -TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-25 + GO_ARGS BR sync∕atomic·CompareAndSwapInt64(SB) -TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-0 +TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25 + GO_ARGS BR sync∕atomic·CompareAndSwapInt64(SB) // Common function used to call tsan's atomic functions