2014-07-16 15:16:19 -06:00
|
|
|
// Copyright 2014 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
|
|
import (
|
2015-11-11 10:39:30 -07:00
|
|
|
"runtime/internal/sys"
|
2014-07-16 15:16:19 -06:00
|
|
|
"unsafe"
|
|
|
|
)
|
|
|
|
|
|
|
|
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
|
|
|
|
if raceenabled && h != nil {
|
cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
2014-08-27 19:59:49 -06:00
|
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
2014-09-03 09:10:38 -06:00
|
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
if h == nil || h.count == 0 {
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
2015-12-07 12:22:08 -07:00
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map read and map write")
|
|
|
|
}
|
2014-07-16 15:16:19 -06:00
|
|
|
var b *bmap
|
|
|
|
if h.B == 0 {
|
2016-03-01 16:21:55 -07:00
|
|
|
// One-bucket table. No need to hash.
|
2014-07-16 15:16:19 -06:00
|
|
|
b = (*bmap)(h.buckets)
|
|
|
|
} else {
|
2015-01-06 17:42:48 -07:00
|
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
2014-07-16 15:16:19 -06:00
|
|
|
m := uintptr(1)<<h.B - 1
|
2014-08-01 15:38:56 -06:00
|
|
|
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
2014-07-16 15:16:19 -06:00
|
|
|
if c := h.oldbuckets; c != nil {
|
runtime: limit the number of map overflow buckets
Consider repeatedly adding many items to a map
and then deleting them all, as in #16070. The map
itself doesn't need to grow above the high water
mark of number of items. However, due to random
collisions, the map can accumulate overflow
buckets.
Prior to this CL, those overflow buckets were
never removed, which led to a slow memory leak.
The problem with removing overflow buckets is
iterators. The obvious approach is to repack
keys and values and eliminate unused overflow
buckets. However, keys, values, and overflow
buckets cannot be manipulated without disrupting
iterators.
This CL takes a different approach, which is to
reuse the existing map growth mechanism,
which is well established, well tested, and
safe in the presence of iterators.
When a map has accumulated enough overflow buckets
we trigger map growth, but grow into a map of the
same size as before. The old overflow buckets will
be left behind for garbage collection.
For the code in #16070, instead of climbing
(very slowly) forever, memory usage now cycles
between 264mb and 483mb every 15 minutes or so.
To avoid increasing the size of maps,
the overflow bucket counter is only 16 bits.
For large maps, the counter is incremented
stochastically.
Fixes #16070
Change-Id: If551d77613ec6836907efca58bda3deee304297e
Reviewed-on: https://go-review.googlesource.com/25049
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-07-19 12:47:53 -06:00
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
|
|
m >>= 1
|
|
|
|
}
|
|
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
2014-07-16 15:16:19 -06:00
|
|
|
if !evacuated(oldb) {
|
|
|
|
b = oldb
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
|
|
k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
|
|
|
|
if k != key {
|
|
|
|
continue
|
|
|
|
}
|
2014-08-01 15:38:56 -06:00
|
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
|
|
|
|
if x == empty {
|
2014-07-16 15:16:19 -06:00
|
|
|
continue
|
|
|
|
}
|
2014-08-01 15:38:56 -06:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
2014-12-19 21:44:18 -07:00
|
|
|
b = b.overflow(t)
|
2014-07-16 15:16:19 -06:00
|
|
|
if b == nil {
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
|
|
|
|
if raceenabled && h != nil {
|
cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
2014-08-27 19:59:49 -06:00
|
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
2014-09-03 09:10:38 -06:00
|
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
if h == nil || h.count == 0 {
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
2015-12-07 12:22:08 -07:00
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map read and map write")
|
|
|
|
}
|
2014-07-16 15:16:19 -06:00
|
|
|
var b *bmap
|
|
|
|
if h.B == 0 {
|
2016-03-01 16:21:55 -07:00
|
|
|
// One-bucket table. No need to hash.
|
2014-07-16 15:16:19 -06:00
|
|
|
b = (*bmap)(h.buckets)
|
|
|
|
} else {
|
2015-01-06 17:42:48 -07:00
|
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
2014-07-16 15:16:19 -06:00
|
|
|
m := uintptr(1)<<h.B - 1
|
2014-08-01 15:38:56 -06:00
|
|
|
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
2014-07-16 15:16:19 -06:00
|
|
|
if c := h.oldbuckets; c != nil {
|
runtime: limit the number of map overflow buckets
Consider repeatedly adding many items to a map
and then deleting them all, as in #16070. The map
itself doesn't need to grow above the high water
mark of number of items. However, due to random
collisions, the map can accumulate overflow
buckets.
Prior to this CL, those overflow buckets were
never removed, which led to a slow memory leak.
The problem with removing overflow buckets is
iterators. The obvious approach is to repack
keys and values and eliminate unused overflow
buckets. However, keys, values, and overflow
buckets cannot be manipulated without disrupting
iterators.
This CL takes a different approach, which is to
reuse the existing map growth mechanism,
which is well established, well tested, and
safe in the presence of iterators.
When a map has accumulated enough overflow buckets
we trigger map growth, but grow into a map of the
same size as before. The old overflow buckets will
be left behind for garbage collection.
For the code in #16070, instead of climbing
(very slowly) forever, memory usage now cycles
between 264mb and 483mb every 15 minutes or so.
To avoid increasing the size of maps,
the overflow bucket counter is only 16 bits.
For large maps, the counter is incremented
stochastically.
Fixes #16070
Change-Id: If551d77613ec6836907efca58bda3deee304297e
Reviewed-on: https://go-review.googlesource.com/25049
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-07-19 12:47:53 -06:00
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
|
|
m >>= 1
|
|
|
|
}
|
|
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
2014-07-16 15:16:19 -06:00
|
|
|
if !evacuated(oldb) {
|
|
|
|
b = oldb
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
|
|
k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
|
|
|
|
if k != key {
|
|
|
|
continue
|
|
|
|
}
|
2014-08-01 15:38:56 -06:00
|
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
|
|
|
|
if x == empty {
|
2014-07-16 15:16:19 -06:00
|
|
|
continue
|
|
|
|
}
|
2014-08-01 15:38:56 -06:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
2014-12-19 21:44:18 -07:00
|
|
|
b = b.overflow(t)
|
2014-07-16 15:16:19 -06:00
|
|
|
if b == nil {
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
|
|
|
|
if raceenabled && h != nil {
|
cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
2014-08-27 19:59:49 -06:00
|
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
2014-09-03 09:10:38 -06:00
|
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
if h == nil || h.count == 0 {
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
2015-12-07 12:22:08 -07:00
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map read and map write")
|
|
|
|
}
|
2014-07-16 15:16:19 -06:00
|
|
|
var b *bmap
|
|
|
|
if h.B == 0 {
|
2016-03-01 16:21:55 -07:00
|
|
|
// One-bucket table. No need to hash.
|
2014-07-16 15:16:19 -06:00
|
|
|
b = (*bmap)(h.buckets)
|
|
|
|
} else {
|
2015-01-06 17:42:48 -07:00
|
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
2014-07-16 15:16:19 -06:00
|
|
|
m := uintptr(1)<<h.B - 1
|
2014-08-01 15:38:56 -06:00
|
|
|
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
2014-07-16 15:16:19 -06:00
|
|
|
if c := h.oldbuckets; c != nil {
|
runtime: limit the number of map overflow buckets
Consider repeatedly adding many items to a map
and then deleting them all, as in #16070. The map
itself doesn't need to grow above the high water
mark of number of items. However, due to random
collisions, the map can accumulate overflow
buckets.
Prior to this CL, those overflow buckets were
never removed, which led to a slow memory leak.
The problem with removing overflow buckets is
iterators. The obvious approach is to repack
keys and values and eliminate unused overflow
buckets. However, keys, values, and overflow
buckets cannot be manipulated without disrupting
iterators.
This CL takes a different approach, which is to
reuse the existing map growth mechanism,
which is well established, well tested, and
safe in the presence of iterators.
When a map has accumulated enough overflow buckets
we trigger map growth, but grow into a map of the
same size as before. The old overflow buckets will
be left behind for garbage collection.
For the code in #16070, instead of climbing
(very slowly) forever, memory usage now cycles
between 264mb and 483mb every 15 minutes or so.
To avoid increasing the size of maps,
the overflow bucket counter is only 16 bits.
For large maps, the counter is incremented
stochastically.
Fixes #16070
Change-Id: If551d77613ec6836907efca58bda3deee304297e
Reviewed-on: https://go-review.googlesource.com/25049
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-07-19 12:47:53 -06:00
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
|
|
m >>= 1
|
|
|
|
}
|
|
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
2014-07-16 15:16:19 -06:00
|
|
|
if !evacuated(oldb) {
|
|
|
|
b = oldb
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
|
|
k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
|
|
|
|
if k != key {
|
|
|
|
continue
|
|
|
|
}
|
2014-08-01 15:38:56 -06:00
|
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
|
|
|
|
if x == empty {
|
2014-07-16 15:16:19 -06:00
|
|
|
continue
|
|
|
|
}
|
2014-08-01 15:38:56 -06:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
2014-12-19 21:44:18 -07:00
|
|
|
b = b.overflow(t)
|
2014-07-16 15:16:19 -06:00
|
|
|
if b == nil {
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
|
|
|
|
if raceenabled && h != nil {
|
cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
2014-08-27 19:59:49 -06:00
|
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
2014-09-03 09:10:38 -06:00
|
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
if h == nil || h.count == 0 {
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
2015-12-07 12:22:08 -07:00
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map read and map write")
|
|
|
|
}
|
2014-07-16 15:16:19 -06:00
|
|
|
var b *bmap
|
|
|
|
if h.B == 0 {
|
2016-03-01 16:21:55 -07:00
|
|
|
// One-bucket table. No need to hash.
|
2014-07-16 15:16:19 -06:00
|
|
|
b = (*bmap)(h.buckets)
|
|
|
|
} else {
|
2015-01-06 17:42:48 -07:00
|
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
2014-07-16 15:16:19 -06:00
|
|
|
m := uintptr(1)<<h.B - 1
|
2014-08-01 15:38:56 -06:00
|
|
|
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
2014-07-16 15:16:19 -06:00
|
|
|
if c := h.oldbuckets; c != nil {
|
runtime: limit the number of map overflow buckets
Consider repeatedly adding many items to a map
and then deleting them all, as in #16070. The map
itself doesn't need to grow above the high water
mark of number of items. However, due to random
collisions, the map can accumulate overflow
buckets.
Prior to this CL, those overflow buckets were
never removed, which led to a slow memory leak.
The problem with removing overflow buckets is
iterators. The obvious approach is to repack
keys and values and eliminate unused overflow
buckets. However, keys, values, and overflow
buckets cannot be manipulated without disrupting
iterators.
This CL takes a different approach, which is to
reuse the existing map growth mechanism,
which is well established, well tested, and
safe in the presence of iterators.
When a map has accumulated enough overflow buckets
we trigger map growth, but grow into a map of the
same size as before. The old overflow buckets will
be left behind for garbage collection.
For the code in #16070, instead of climbing
(very slowly) forever, memory usage now cycles
between 264mb and 483mb every 15 minutes or so.
To avoid increasing the size of maps,
the overflow bucket counter is only 16 bits.
For large maps, the counter is incremented
stochastically.
Fixes #16070
Change-Id: If551d77613ec6836907efca58bda3deee304297e
Reviewed-on: https://go-review.googlesource.com/25049
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-07-19 12:47:53 -06:00
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
|
|
m >>= 1
|
|
|
|
}
|
|
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
2014-07-16 15:16:19 -06:00
|
|
|
if !evacuated(oldb) {
|
|
|
|
b = oldb
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
|
|
k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
|
|
|
|
if k != key {
|
|
|
|
continue
|
|
|
|
}
|
2014-08-01 15:38:56 -06:00
|
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
|
|
|
|
if x == empty {
|
2014-07-16 15:16:19 -06:00
|
|
|
continue
|
|
|
|
}
|
2014-08-01 15:38:56 -06:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
2014-12-19 21:44:18 -07:00
|
|
|
b = b.overflow(t)
|
2014-07-16 15:16:19 -06:00
|
|
|
if b == nil {
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
|
|
|
|
if raceenabled && h != nil {
|
cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
2014-08-27 19:59:49 -06:00
|
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
2014-09-03 09:10:38 -06:00
|
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
if h == nil || h.count == 0 {
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
2015-12-07 12:22:08 -07:00
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map read and map write")
|
|
|
|
}
|
2015-10-20 01:35:12 -06:00
|
|
|
key := stringStructOf(&ky)
|
2014-07-16 15:16:19 -06:00
|
|
|
if h.B == 0 {
|
|
|
|
// One-bucket table.
|
|
|
|
b := (*bmap)(h.buckets)
|
|
|
|
if key.len < 32 {
|
|
|
|
// short key, doing lots of comparisons is ok
|
|
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
2014-08-01 15:38:56 -06:00
|
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
|
|
|
|
if x == empty {
|
2014-07-16 15:16:19 -06:00
|
|
|
continue
|
|
|
|
}
|
2015-11-11 10:39:30 -07:00
|
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
|
2014-07-16 15:16:19 -06:00
|
|
|
if k.len != key.len {
|
|
|
|
continue
|
|
|
|
}
|
2016-02-22 14:20:38 -07:00
|
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
2015-11-11 10:39:30 -07:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
}
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
// long key, try not to do more comparisons than necessary
|
|
|
|
keymaybe := uintptr(bucketCnt)
|
|
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
2014-08-01 15:38:56 -06:00
|
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
|
|
|
|
if x == empty {
|
2014-07-16 15:16:19 -06:00
|
|
|
continue
|
|
|
|
}
|
2015-11-11 10:39:30 -07:00
|
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
|
2014-07-16 15:16:19 -06:00
|
|
|
if k.len != key.len {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if k.str == key.str {
|
2015-11-11 10:39:30 -07:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
// check first 4 bytes
|
|
|
|
// TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
|
|
|
|
// four 1-byte comparisons.
|
|
|
|
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// check last 4 bytes
|
|
|
|
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if keymaybe != bucketCnt {
|
2016-03-01 16:21:55 -07:00
|
|
|
// Two keys are potential matches. Use hash to distinguish them.
|
2014-07-16 15:16:19 -06:00
|
|
|
goto dohash
|
|
|
|
}
|
|
|
|
keymaybe = i
|
|
|
|
}
|
|
|
|
if keymaybe != bucketCnt {
|
2015-11-11 10:39:30 -07:00
|
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
|
2016-02-22 14:20:38 -07:00
|
|
|
if memequal(k.str, key.str, uintptr(key.len)) {
|
2015-11-11 10:39:30 -07:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
}
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
dohash:
|
2015-01-06 17:42:48 -07:00
|
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
|
2014-07-16 15:16:19 -06:00
|
|
|
m := uintptr(1)<<h.B - 1
|
2014-08-01 15:38:56 -06:00
|
|
|
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
2014-07-16 15:16:19 -06:00
|
|
|
if c := h.oldbuckets; c != nil {
|
runtime: limit the number of map overflow buckets
Consider repeatedly adding many items to a map
and then deleting them all, as in #16070. The map
itself doesn't need to grow above the high water
mark of number of items. However, due to random
collisions, the map can accumulate overflow
buckets.
Prior to this CL, those overflow buckets were
never removed, which led to a slow memory leak.
The problem with removing overflow buckets is
iterators. The obvious approach is to repack
keys and values and eliminate unused overflow
buckets. However, keys, values, and overflow
buckets cannot be manipulated without disrupting
iterators.
This CL takes a different approach, which is to
reuse the existing map growth mechanism,
which is well established, well tested, and
safe in the presence of iterators.
When a map has accumulated enough overflow buckets
we trigger map growth, but grow into a map of the
same size as before. The old overflow buckets will
be left behind for garbage collection.
For the code in #16070, instead of climbing
(very slowly) forever, memory usage now cycles
between 264mb and 483mb every 15 minutes or so.
To avoid increasing the size of maps,
the overflow bucket counter is only 16 bits.
For large maps, the counter is incremented
stochastically.
Fixes #16070
Change-Id: If551d77613ec6836907efca58bda3deee304297e
Reviewed-on: https://go-review.googlesource.com/25049
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-07-19 12:47:53 -06:00
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
|
|
m >>= 1
|
|
|
|
}
|
|
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
2014-07-16 15:16:19 -06:00
|
|
|
if !evacuated(oldb) {
|
|
|
|
b = oldb
|
|
|
|
}
|
|
|
|
}
|
2015-11-11 10:39:30 -07:00
|
|
|
top := uint8(hash >> (sys.PtrSize*8 - 8))
|
2014-07-16 15:16:19 -06:00
|
|
|
if top < minTopHash {
|
|
|
|
top += minTopHash
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
2014-08-01 15:38:56 -06:00
|
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
|
|
|
|
if x != top {
|
2014-07-16 15:16:19 -06:00
|
|
|
continue
|
|
|
|
}
|
2015-11-11 10:39:30 -07:00
|
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
|
2014-07-16 15:16:19 -06:00
|
|
|
if k.len != key.len {
|
|
|
|
continue
|
|
|
|
}
|
2016-02-22 14:20:38 -07:00
|
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
2015-11-11 10:39:30 -07:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
}
|
2014-12-19 21:44:18 -07:00
|
|
|
b = b.overflow(t)
|
2014-07-16 15:16:19 -06:00
|
|
|
if b == nil {
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0])
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
|
|
|
|
if raceenabled && h != nil {
|
cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
2014-08-27 19:59:49 -06:00
|
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
2014-09-03 09:10:38 -06:00
|
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
if h == nil || h.count == 0 {
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
2015-12-07 12:22:08 -07:00
|
|
|
if h.flags&hashWriting != 0 {
|
|
|
|
throw("concurrent map read and map write")
|
|
|
|
}
|
2015-10-20 01:35:12 -06:00
|
|
|
key := stringStructOf(&ky)
|
2014-07-16 15:16:19 -06:00
|
|
|
if h.B == 0 {
|
|
|
|
// One-bucket table.
|
|
|
|
b := (*bmap)(h.buckets)
|
|
|
|
if key.len < 32 {
|
|
|
|
// short key, doing lots of comparisons is ok
|
|
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
2014-08-01 15:38:56 -06:00
|
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
|
|
|
|
if x == empty {
|
2014-07-16 15:16:19 -06:00
|
|
|
continue
|
|
|
|
}
|
2015-11-11 10:39:30 -07:00
|
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
|
2014-07-16 15:16:19 -06:00
|
|
|
if k.len != key.len {
|
|
|
|
continue
|
|
|
|
}
|
2016-02-22 14:20:38 -07:00
|
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
2015-11-11 10:39:30 -07:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
}
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
// long key, try not to do more comparisons than necessary
|
|
|
|
keymaybe := uintptr(bucketCnt)
|
|
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
2014-08-01 15:38:56 -06:00
|
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
|
|
|
|
if x == empty {
|
2014-07-16 15:16:19 -06:00
|
|
|
continue
|
|
|
|
}
|
2015-11-11 10:39:30 -07:00
|
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
|
2014-07-16 15:16:19 -06:00
|
|
|
if k.len != key.len {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if k.str == key.str {
|
2015-11-11 10:39:30 -07:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
// check first 4 bytes
|
|
|
|
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// check last 4 bytes
|
|
|
|
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if keymaybe != bucketCnt {
|
2016-03-01 16:21:55 -07:00
|
|
|
// Two keys are potential matches. Use hash to distinguish them.
|
2014-07-16 15:16:19 -06:00
|
|
|
goto dohash
|
|
|
|
}
|
|
|
|
keymaybe = i
|
|
|
|
}
|
|
|
|
if keymaybe != bucketCnt {
|
2015-11-11 10:39:30 -07:00
|
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
|
2016-02-22 14:20:38 -07:00
|
|
|
if memequal(k.str, key.str, uintptr(key.len)) {
|
2015-11-11 10:39:30 -07:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
}
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
dohash:
|
2015-01-06 17:42:48 -07:00
|
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
|
2014-07-16 15:16:19 -06:00
|
|
|
m := uintptr(1)<<h.B - 1
|
2014-08-01 15:38:56 -06:00
|
|
|
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
2014-07-16 15:16:19 -06:00
|
|
|
if c := h.oldbuckets; c != nil {
|
runtime: limit the number of map overflow buckets
Consider repeatedly adding many items to a map
and then deleting them all, as in #16070. The map
itself doesn't need to grow above the high water
mark of number of items. However, due to random
collisions, the map can accumulate overflow
buckets.
Prior to this CL, those overflow buckets were
never removed, which led to a slow memory leak.
The problem with removing overflow buckets is
iterators. The obvious approach is to repack
keys and values and eliminate unused overflow
buckets. However, keys, values, and overflow
buckets cannot be manipulated without disrupting
iterators.
This CL takes a different approach, which is to
reuse the existing map growth mechanism,
which is well established, well tested, and
safe in the presence of iterators.
When a map has accumulated enough overflow buckets
we trigger map growth, but grow into a map of the
same size as before. The old overflow buckets will
be left behind for garbage collection.
For the code in #16070, instead of climbing
(very slowly) forever, memory usage now cycles
between 264mb and 483mb every 15 minutes or so.
To avoid increasing the size of maps,
the overflow bucket counter is only 16 bits.
For large maps, the counter is incremented
stochastically.
Fixes #16070
Change-Id: If551d77613ec6836907efca58bda3deee304297e
Reviewed-on: https://go-review.googlesource.com/25049
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
2016-07-19 12:47:53 -06:00
|
|
|
if !h.sameSizeGrow() {
|
|
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
|
|
m >>= 1
|
|
|
|
}
|
|
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
2014-07-16 15:16:19 -06:00
|
|
|
if !evacuated(oldb) {
|
|
|
|
b = oldb
|
|
|
|
}
|
|
|
|
}
|
2015-11-11 10:39:30 -07:00
|
|
|
top := uint8(hash >> (sys.PtrSize*8 - 8))
|
2014-07-16 15:16:19 -06:00
|
|
|
if top < minTopHash {
|
|
|
|
top += minTopHash
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
2014-08-01 15:38:56 -06:00
|
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
|
|
|
|
if x != top {
|
2014-07-16 15:16:19 -06:00
|
|
|
continue
|
|
|
|
}
|
2015-11-11 10:39:30 -07:00
|
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
|
2014-07-16 15:16:19 -06:00
|
|
|
if k.len != key.len {
|
|
|
|
continue
|
|
|
|
}
|
2016-02-22 14:20:38 -07:00
|
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
2015-11-11 10:39:30 -07:00
|
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
}
|
2014-12-19 21:44:18 -07:00
|
|
|
b = b.overflow(t)
|
2014-07-16 15:16:19 -06:00
|
|
|
if b == nil {
|
2016-04-19 09:31:04 -06:00
|
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
2014-07-16 15:16:19 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|