mirror of
https://github.com/golang/go
synced 2024-11-14 06:50:21 -07:00
Revert "sync: improve sync.Pool object stealing"
This reverts CL 303949. Reason for revert: broke linux-arm-aws TryBots. Change-Id: Ib44949df70520cdabff857846be0d2221403d2f4 Reviewed-on: https://go-review.googlesource.com/c/go/+/313630 Trust: Bryan C. Mills <bcmills@google.com> Run-TryBot: Bryan C. Mills <bcmills@google.com> Reviewed-by: Cherry Zhang <cherryyz@google.com> TryBot-Result: Go Bot <gobot@golang.org>
This commit is contained in:
parent
c430313992
commit
0d1280c685
@ -70,57 +70,6 @@ type poolLocal struct {
|
|||||||
pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte
|
pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// The randomOrder and randomEnum are copied from runtime/proc.go
|
|
||||||
type randomOrder struct {
|
|
||||||
count uint32
|
|
||||||
coprimes []uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
type randomEnum struct {
|
|
||||||
i uint32
|
|
||||||
count uint32
|
|
||||||
pos uint32
|
|
||||||
inc uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ord *randomOrder) reset(count uint32) {
|
|
||||||
ord.count = count
|
|
||||||
ord.coprimes = ord.coprimes[:0]
|
|
||||||
for i := uint32(1); i <= count; i++ {
|
|
||||||
if gcd(i, count) == 1 {
|
|
||||||
ord.coprimes = append(ord.coprimes, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ord *randomOrder) start(i uint32) randomEnum {
|
|
||||||
return randomEnum{
|
|
||||||
count: ord.count,
|
|
||||||
pos: i % ord.count,
|
|
||||||
inc: ord.coprimes[i%uint32(len(ord.coprimes))],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enum *randomEnum) done() bool {
|
|
||||||
return enum.i == enum.count
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enum *randomEnum) next() {
|
|
||||||
enum.i++
|
|
||||||
enum.pos = (enum.pos + enum.inc) % enum.count
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enum *randomEnum) position() uint32 {
|
|
||||||
return enum.pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func gcd(a, b uint32) uint32 {
|
|
||||||
for b != 0 {
|
|
||||||
a, b = b, a%b
|
|
||||||
}
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
// from runtime
|
// from runtime
|
||||||
func fastrand() uint32
|
func fastrand() uint32
|
||||||
|
|
||||||
@ -204,53 +153,29 @@ func (p *Pool) Get() interface{} {
|
|||||||
func (p *Pool) getSlow(pid int) interface{} {
|
func (p *Pool) getSlow(pid int) interface{} {
|
||||||
// See the comment in pin regarding ordering of the loads.
|
// See the comment in pin regarding ordering of the loads.
|
||||||
size := runtime_LoadAcquintptr(&p.localSize) // load-acquire
|
size := runtime_LoadAcquintptr(&p.localSize) // load-acquire
|
||||||
// Load pOrder atomically to prevent possible races
|
|
||||||
order := (*randomOrder)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&pOrder)))) // load-consume
|
|
||||||
|
|
||||||
// Pin function always returns non-zero localSize, and it will remain so until runtime_procUnpin
|
|
||||||
// is called. This invariant is maintained by pin ensuring that locals is always big enough to
|
|
||||||
// account for the current P and that poolCleanup can never execute concurrently with a pinned P
|
|
||||||
// due to disabled preemtion.
|
|
||||||
// So, we can remove this condition which protects from division by zero in loop's body,
|
|
||||||
// but we leave it here just to be sure there is no any possibility for error
|
|
||||||
if size != 0 {
|
|
||||||
locals := p.local // load-consume
|
locals := p.local // load-consume
|
||||||
// Try to steal one element from other procs.
|
// Try to steal one element from other procs.
|
||||||
for rndp := order.start(fastrand()); !rndp.done(); rndp.next() {
|
for i := 0; i < int(size); i++ {
|
||||||
i := int(rndp.position())
|
|
||||||
// While pOrder is limited to returning indexes within the range of Ps,
|
|
||||||
// locals may be smaller either because it was reset or because of a race
|
|
||||||
// with pinSlow. Hence, we must still mod the local index by size.
|
|
||||||
l := indexLocal(locals, (pid+i+1)%int(size))
|
l := indexLocal(locals, (pid+i+1)%int(size))
|
||||||
if x, _ := l.shared.popTail(); x != nil {
|
if x, _ := l.shared.popTail(); x != nil {
|
||||||
return x
|
return x
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Try the victim cache. We do this after attempting to steal
|
// Try the victim cache. We do this after attempting to steal
|
||||||
// from all primary caches because we want objects in the
|
// from all primary caches because we want objects in the
|
||||||
// victim cache to age out if at all possible.
|
// victim cache to age out if at all possible.
|
||||||
size = atomic.LoadUintptr(&p.victimSize)
|
size = atomic.LoadUintptr(&p.victimSize)
|
||||||
|
|
||||||
// We also have to ensure that victim cache is big enough to account current P
|
|
||||||
// and size is not equal to zero (protects from division by zero) similar as pin
|
|
||||||
// function do
|
|
||||||
if uintptr(pid) >= size {
|
if uintptr(pid) >= size {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
locals := p.victim
|
locals = p.victim
|
||||||
l := indexLocal(locals, pid)
|
l := indexLocal(locals, pid)
|
||||||
|
|
||||||
// Check private cache
|
|
||||||
if x := l.private; x != nil {
|
if x := l.private; x != nil {
|
||||||
l.private = nil
|
l.private = nil
|
||||||
return x
|
return x
|
||||||
}
|
}
|
||||||
|
for i := 0; i < int(size); i++ {
|
||||||
// Try to fetch from the tail of other P queues
|
|
||||||
for rndp := order.start(fastrand()); !rndp.done(); rndp.next() {
|
|
||||||
i := int(rndp.position())
|
|
||||||
l := indexLocal(locals, (pid+i)%int(size))
|
l := indexLocal(locals, (pid+i)%int(size))
|
||||||
if x, _ := l.shared.popTail(); x != nil {
|
if x, _ := l.shared.popTail(); x != nil {
|
||||||
return x
|
return x
|
||||||
@ -299,12 +224,8 @@ func (p *Pool) pinSlow() (*poolLocal, int) {
|
|||||||
}
|
}
|
||||||
// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
|
// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
|
||||||
size := runtime.GOMAXPROCS(0)
|
size := runtime.GOMAXPROCS(0)
|
||||||
// Set count of Ps for random ordering
|
|
||||||
order := &randomOrder{}
|
|
||||||
order.reset(uint32(size))
|
|
||||||
local := make([]poolLocal, size)
|
local := make([]poolLocal, size)
|
||||||
atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
|
atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&pOrder)), unsafe.Pointer(order)) // store-release
|
|
||||||
runtime_StoreReluintptr(&p.localSize, uintptr(size)) // store-release
|
runtime_StoreReluintptr(&p.localSize, uintptr(size)) // store-release
|
||||||
return &local[pid], pid
|
return &local[pid], pid
|
||||||
}
|
}
|
||||||
@ -346,10 +267,6 @@ var (
|
|||||||
// oldPools is the set of pools that may have non-empty victim
|
// oldPools is the set of pools that may have non-empty victim
|
||||||
// caches. Protected by STW.
|
// caches. Protected by STW.
|
||||||
oldPools []*Pool
|
oldPools []*Pool
|
||||||
|
|
||||||
// pOrder is a random order of Ps used for stealing. Writes
|
|
||||||
// are protected by allPoolsMu. Reads are atomic.
|
|
||||||
pOrder *randomOrder
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -271,24 +271,6 @@ func BenchmarkPoolOverflow(b *testing.B) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Simulate object starvation in order to force Ps to steal objects
|
|
||||||
// from other Ps.
|
|
||||||
func BenchmarkPoolStarvation(b *testing.B) {
|
|
||||||
var p Pool
|
|
||||||
count := 100
|
|
||||||
count_starved := count - (count / runtime.GOMAXPROCS(0))
|
|
||||||
b.RunParallel(func(pb *testing.PB) {
|
|
||||||
for pb.Next() {
|
|
||||||
for b := 0; b < count_starved; b++ {
|
|
||||||
p.Put(1)
|
|
||||||
}
|
|
||||||
for b := 0; b < count; b++ {
|
|
||||||
p.Get()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
var globalSink interface{}
|
var globalSink interface{}
|
||||||
|
|
||||||
func BenchmarkPoolSTW(b *testing.B) {
|
func BenchmarkPoolSTW(b *testing.B) {
|
||||||
|
Loading…
Reference in New Issue
Block a user