1
0
mirror of https://github.com/golang/go synced 2024-11-23 17:40:03 -07:00

runtime: best-effort detection of concurrent misuse of maps

If reports like #13062 are really concurrent misuse of maps,
we can detect that, at least some of the time, with a cheap check.

There is an extra pair of memory writes for writing to a map,
but to the same cache line as h.count, which is often being modified anyway,
and there is an extra memory read for reading from a map,
but to the same cache line as h.count, which is always being read anyway.
So the check should be basically invisible and may help reduce the
number of "mysterious runtime crash due to map misuse" reports.

Change-Id: I0e71b0d92eaa3b7bef48bf41b0f5ab790092487e
Reviewed-on: https://go-review.googlesource.com/17501
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Russ Cox 2015-12-07 14:22:08 -05:00
parent 7ebb96a489
commit 50c5042047
2 changed files with 52 additions and 3 deletions

View File

@ -95,6 +95,7 @@ const (
// flags
iterator = 1 // there may be an iterator using buckets
oldIterator = 2 // there may be an iterator using oldbuckets
hashWriting = 4 // a goroutine is writing to the map
// sentinel bucket ID for iterator checks
noCheck = 1<<(8*sys.PtrSize) - 1
@ -284,6 +285,9 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if h == nil || h.count == 0 {
return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
@ -335,6 +339,9 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
if h == nil || h.count == 0 {
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
@ -378,6 +385,9 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
if h == nil || h.count == 0 {
return nil, nil
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
@ -431,6 +441,10 @@ func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
msanread(key, t.key.size)
msanread(val, t.elem.size)
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
h.flags |= hashWriting
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
@ -481,7 +495,7 @@ again:
v2 = *((*unsafe.Pointer)(v2))
}
typedmemmove(t.elem, v2, val)
return
goto done
}
ovf := b.overflow(t)
if ovf == nil {
@ -520,6 +534,12 @@ again:
typedmemmove(t.elem, insertv, val)
*inserti = top
h.count++
done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
}
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
@ -535,6 +555,11 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if h == nil || h.count == 0 {
return
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
h.flags |= hashWriting
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
bucket := hash & (uintptr(1)<<h.B - 1)
@ -564,13 +589,19 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
memclr(v, uintptr(t.valuesize))
b.tophash[i] = empty
h.count--
return
goto done
}
b = b.overflow(t)
if b == nil {
return
goto done
}
}
done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
}
func mapiterinit(t *maptype, h *hmap, it *hiter) {

View File

@ -18,6 +18,9 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if h == nil || h.count == 0 {
return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
// One-bucket table. No need to hash.
@ -60,6 +63,9 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
if h == nil || h.count == 0 {
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
// One-bucket table. No need to hash.
@ -102,6 +108,9 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if h == nil || h.count == 0 {
return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
// One-bucket table. No need to hash.
@ -144,6 +153,9 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
if h == nil || h.count == 0 {
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
// One-bucket table. No need to hash.
@ -186,6 +198,9 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if h == nil || h.count == 0 {
return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
key := stringStructOf(&ky)
if h.B == 0 {
// One-bucket table.
@ -288,6 +303,9 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if h == nil || h.count == 0 {
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
key := stringStructOf(&ky)
if h.B == 0 {
// One-bucket table.