1
0
mirror of https://github.com/golang/go synced 2024-11-23 16:00:06 -07:00

runtime/internal/atomic: add early nil check on ARM

If nil, fault before taking the lock or calling into the kernel.

Change-Id: I013d78a5f9233c2a9197660025f679940655d384
Reviewed-on: https://go-review.googlesource.com/93636
Run-TryBot: Cherry Zhang <cherryyz@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Cherry Zhang 2018-02-12 17:00:01 -05:00
parent 97124af99a
commit 633b38c5d2
2 changed files with 9 additions and 1 deletions

View File

@ -109,6 +109,7 @@ func Cas64(addr *uint64, old, new uint64) bool {
if uintptr(unsafe.Pointer(addr))&7 != 0 {
*(*int)(nil) = 0 // crash on unaligned uint64
}
_ = *addr // if nil, fault before taking the lock
var ok bool
addrLock(addr).lock()
if *addr == old {
@ -124,6 +125,7 @@ func Xadd64(addr *uint64, delta int64) uint64 {
if uintptr(unsafe.Pointer(addr))&7 != 0 {
*(*int)(nil) = 0 // crash on unaligned uint64
}
_ = *addr // if nil, fault before taking the lock
var r uint64
addrLock(addr).lock()
r = *addr + uint64(delta)
@ -137,6 +139,7 @@ func Xchg64(addr *uint64, v uint64) uint64 {
if uintptr(unsafe.Pointer(addr))&7 != 0 {
*(*int)(nil) = 0 // crash on unaligned uint64
}
_ = *addr // if nil, fault before taking the lock
var r uint64
addrLock(addr).lock()
r = *addr
@ -150,6 +153,7 @@ func Load64(addr *uint64) uint64 {
if uintptr(unsafe.Pointer(addr))&7 != 0 {
*(*int)(nil) = 0 // crash on unaligned uint64
}
_ = *addr // if nil, fault before taking the lock
var r uint64
addrLock(addr).lock()
r = *addr
@ -162,6 +166,7 @@ func Store64(addr *uint64, v uint64) {
if uintptr(unsafe.Pointer(addr))&7 != 0 {
*(*int)(nil) = 0 // crash on unaligned uint64
}
_ = *addr // if nil, fault before taking the lock
addrLock(addr).lock()
*addr = v
addrLock(addr).unlock()

View File

@ -11,6 +11,9 @@ TEXT cas<>(SB),NOSPLIT,$0
TEXT runtimeinternalatomic·Cas(SB),NOSPLIT,$0
MOVW ptr+0(FP), R2
// trigger potential paging fault here,
// because we don't know how to traceback through __kuser_cmpxchg
MOVW (R2), R0
MOVW old+4(FP), R0
loop:
MOVW new+8(FP), R1
@ -39,4 +42,4 @@ TEXT runtimeinternalatomic·Casp1(SB),NOSPLIT,$0
// a memory barrier, but it requires writing to a coprocessor
// register. ARMv7 introduced the DMB instruction, but it's expensive
// even on single-core devices. The kernel helper takes care of all of
// this for us.
// this for us.