mirror of
https://github.com/golang/go
synced 2024-11-23 18:00:06 -07:00
runtime: fix (*gcSweepBuf).block guarantees
Currently gcSweepBuf guarantees that push operations may be performed concurrently with each other and that block operations may be performed concurrently with push operations as well. Unfortunately, this isn't quite true. The existing code allows push operations to happen concurrently with each other, but block operations may return blocks with nil entries. The way this can happen is if two concurrent pushers grab a slot to push to, and the first one (the one with the earlier slot in the buffer) doesn't quite write a span value when the block is called. The existing code in block only checks if the very last value in the block is nil, when really an arbitrary number of the last few values in the block may or may not be nil. Today, this case can't actually happen because when push operations happen concurrently during a GC (which is the only time block is called), they only ever happen during an allocation with the heap lock held, effectively serializing them. A block operation may happen concurrently with one of these pushes, but its callers will never see a nil mspan. Outside of a GC, this isn't a problem because although push operations from allocations can run concurrently with push operations from sweeping, block operations will never run. In essence, the real concurrency guarantees provided by gcSweepBuf are that block operations may happen concurrently with push operations, but that push operations may not be concurrent with each other if there are any block operations. To fix this, and to prepare for push operations happening without the heap lock held in a future CL, we update the documentation for block to correctly state that there may be nil entries in the returned slice. While we're here, make the mspan writes into the buffer atomic to avoid a block user racing on a nil check, and document that the user should load mspan values from the returned slice atomically. Finally, we make all callers of block adhere to the new rules. We choose to allow nil values rather than filter them out because the only caller of block is markrootSpans, and if it catches a nil entry, then there wasn't anything to mark in there anyway since the span is just being created. Updates #35112. Change-Id: I6450aab15f51690d7a000ba5b3d529cf2ca5da1e Reviewed-on: https://go-review.googlesource.com/c/go/+/203318 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
dac936a4ab
commit
a5a6f61043
@ -315,15 +315,21 @@ func markrootSpans(gcw *gcWork, shard int) {
|
||||
sg := mheap_.sweepgen
|
||||
spans := mheap_.sweepSpans[mheap_.sweepgen/2%2].block(shard)
|
||||
// Note that work.spans may not include spans that were
|
||||
// allocated between entering the scan phase and now. This is
|
||||
// okay because any objects with finalizers in those spans
|
||||
// must have been allocated and given finalizers after we
|
||||
// entered the scan phase, so addfinalizer will have ensured
|
||||
// the above invariants for them.
|
||||
for _, s := range spans {
|
||||
// allocated between entering the scan phase and now. We may
|
||||
// also race with spans being added into sweepSpans when they're
|
||||
// just created, and as a result we may see nil pointers in the
|
||||
// spans slice. This is okay because any objects with finalizers
|
||||
// in those spans must have been allocated and given finalizers
|
||||
// after we entered the scan phase, so addfinalizer will have
|
||||
// ensured the above invariants for them.
|
||||
for i := 0; i < len(spans); i++ {
|
||||
// sweepBuf.block requires that we read pointers from the block atomically.
|
||||
// It also requires that we ignore nil pointers.
|
||||
s := (*mspan)(atomic.Loadp(unsafe.Pointer(&spans[i])))
|
||||
|
||||
// This is racing with spans being initialized, so
|
||||
// check the state carefully.
|
||||
if s.state.get() != mSpanInUse {
|
||||
if s == nil || s.state.get() != mSpanInUse {
|
||||
continue
|
||||
}
|
||||
// Check that this span was swept (it may be cached or uncached).
|
||||
|
@ -111,8 +111,9 @@ retry:
|
||||
unlock(&b.spineLock)
|
||||
}
|
||||
|
||||
// We have a block. Insert the span.
|
||||
block.spans[bottom] = s
|
||||
// We have a block. Insert the span atomically, since there may be
|
||||
// concurrent readers via the block API.
|
||||
atomic.StorepNoWB(unsafe.Pointer(&block.spans[bottom]), unsafe.Pointer(s))
|
||||
}
|
||||
|
||||
// pop removes and returns a span from buffer b, or nil if b is empty.
|
||||
@ -147,7 +148,9 @@ func (b *gcSweepBuf) numBlocks() int {
|
||||
}
|
||||
|
||||
// block returns the spans in the i'th block of buffer b. block is
|
||||
// safe to call concurrently with push.
|
||||
// safe to call concurrently with push. The block may contain nil
|
||||
// pointers that must be ignored, and each entry in the block must be
|
||||
// loaded atomically.
|
||||
func (b *gcSweepBuf) block(i int) []*mspan {
|
||||
// Perform bounds check before loading spine address since
|
||||
// push ensures the allocated length is at least spineLen.
|
||||
@ -169,11 +172,5 @@ func (b *gcSweepBuf) block(i int) []*mspan {
|
||||
} else {
|
||||
spans = block.spans[:bottom]
|
||||
}
|
||||
|
||||
// push may have reserved a slot but not filled it yet, so
|
||||
// trim away unused entries.
|
||||
for len(spans) > 0 && spans[len(spans)-1] == nil {
|
||||
spans = spans[:len(spans)-1]
|
||||
}
|
||||
return spans
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user