mirror of
https://github.com/golang/go
synced 2024-11-23 20:50:04 -07:00
runtime/internal/atomic: add early nil check on ARM
If nil, fault before taking the lock or calling into the kernel. Change-Id: I013d78a5f9233c2a9197660025f679940655d384 Reviewed-on: https://go-review.googlesource.com/93636 Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org> Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
97124af99a
commit
633b38c5d2
@ -109,6 +109,7 @@ func Cas64(addr *uint64, old, new uint64) bool {
|
|||||||
if uintptr(unsafe.Pointer(addr))&7 != 0 {
|
if uintptr(unsafe.Pointer(addr))&7 != 0 {
|
||||||
*(*int)(nil) = 0 // crash on unaligned uint64
|
*(*int)(nil) = 0 // crash on unaligned uint64
|
||||||
}
|
}
|
||||||
|
_ = *addr // if nil, fault before taking the lock
|
||||||
var ok bool
|
var ok bool
|
||||||
addrLock(addr).lock()
|
addrLock(addr).lock()
|
||||||
if *addr == old {
|
if *addr == old {
|
||||||
@ -124,6 +125,7 @@ func Xadd64(addr *uint64, delta int64) uint64 {
|
|||||||
if uintptr(unsafe.Pointer(addr))&7 != 0 {
|
if uintptr(unsafe.Pointer(addr))&7 != 0 {
|
||||||
*(*int)(nil) = 0 // crash on unaligned uint64
|
*(*int)(nil) = 0 // crash on unaligned uint64
|
||||||
}
|
}
|
||||||
|
_ = *addr // if nil, fault before taking the lock
|
||||||
var r uint64
|
var r uint64
|
||||||
addrLock(addr).lock()
|
addrLock(addr).lock()
|
||||||
r = *addr + uint64(delta)
|
r = *addr + uint64(delta)
|
||||||
@ -137,6 +139,7 @@ func Xchg64(addr *uint64, v uint64) uint64 {
|
|||||||
if uintptr(unsafe.Pointer(addr))&7 != 0 {
|
if uintptr(unsafe.Pointer(addr))&7 != 0 {
|
||||||
*(*int)(nil) = 0 // crash on unaligned uint64
|
*(*int)(nil) = 0 // crash on unaligned uint64
|
||||||
}
|
}
|
||||||
|
_ = *addr // if nil, fault before taking the lock
|
||||||
var r uint64
|
var r uint64
|
||||||
addrLock(addr).lock()
|
addrLock(addr).lock()
|
||||||
r = *addr
|
r = *addr
|
||||||
@ -150,6 +153,7 @@ func Load64(addr *uint64) uint64 {
|
|||||||
if uintptr(unsafe.Pointer(addr))&7 != 0 {
|
if uintptr(unsafe.Pointer(addr))&7 != 0 {
|
||||||
*(*int)(nil) = 0 // crash on unaligned uint64
|
*(*int)(nil) = 0 // crash on unaligned uint64
|
||||||
}
|
}
|
||||||
|
_ = *addr // if nil, fault before taking the lock
|
||||||
var r uint64
|
var r uint64
|
||||||
addrLock(addr).lock()
|
addrLock(addr).lock()
|
||||||
r = *addr
|
r = *addr
|
||||||
@ -162,6 +166,7 @@ func Store64(addr *uint64, v uint64) {
|
|||||||
if uintptr(unsafe.Pointer(addr))&7 != 0 {
|
if uintptr(unsafe.Pointer(addr))&7 != 0 {
|
||||||
*(*int)(nil) = 0 // crash on unaligned uint64
|
*(*int)(nil) = 0 // crash on unaligned uint64
|
||||||
}
|
}
|
||||||
|
_ = *addr // if nil, fault before taking the lock
|
||||||
addrLock(addr).lock()
|
addrLock(addr).lock()
|
||||||
*addr = v
|
*addr = v
|
||||||
addrLock(addr).unlock()
|
addrLock(addr).unlock()
|
||||||
|
@ -11,6 +11,9 @@ TEXT cas<>(SB),NOSPLIT,$0
|
|||||||
|
|
||||||
TEXT runtime∕internal∕atomic·Cas(SB),NOSPLIT,$0
|
TEXT runtime∕internal∕atomic·Cas(SB),NOSPLIT,$0
|
||||||
MOVW ptr+0(FP), R2
|
MOVW ptr+0(FP), R2
|
||||||
|
// trigger potential paging fault here,
|
||||||
|
// because we don't know how to traceback through __kuser_cmpxchg
|
||||||
|
MOVW (R2), R0
|
||||||
MOVW old+4(FP), R0
|
MOVW old+4(FP), R0
|
||||||
loop:
|
loop:
|
||||||
MOVW new+8(FP), R1
|
MOVW new+8(FP), R1
|
||||||
|
Loading…
Reference in New Issue
Block a user