mirror of
https://github.com/golang/go
synced 2024-11-18 05:14:43 -07:00
runtime: abstract specials list iteration
The specials processing loop in mspan.sweep is about to get more complicated and I'm too allergic to list manipulation to open code more of it there. Change-Id: I767a0889739da85fb2878fc06a5c55b73bf2ba7d Reviewed-on: https://go-review.googlesource.com/c/go/+/305551 Trust: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Michael Pratt <mpratt@google.com>
This commit is contained in:
parent
4e16422da0
commit
1ef114d12c
@ -356,11 +356,10 @@ func (s *mspan) sweep(preserve bool) bool {
|
|||||||
// If such object is not marked, we need to queue all finalizers at once.
|
// If such object is not marked, we need to queue all finalizers at once.
|
||||||
// Both 1 and 2 are possible at the same time.
|
// Both 1 and 2 are possible at the same time.
|
||||||
hadSpecials := s.specials != nil
|
hadSpecials := s.specials != nil
|
||||||
specialp := &s.specials
|
siter := newSpecialsIter(s)
|
||||||
special := *specialp
|
for siter.valid() {
|
||||||
for special != nil {
|
|
||||||
// A finalizer can be set for an inner byte of an object, find object beginning.
|
// A finalizer can be set for an inner byte of an object, find object beginning.
|
||||||
objIndex := uintptr(special.offset) / size
|
objIndex := uintptr(siter.s.offset) / size
|
||||||
p := s.base() + objIndex*size
|
p := s.base() + objIndex*size
|
||||||
mbits := s.markBitsForIndex(objIndex)
|
mbits := s.markBitsForIndex(objIndex)
|
||||||
if !mbits.isMarked() {
|
if !mbits.isMarked() {
|
||||||
@ -368,7 +367,7 @@ func (s *mspan) sweep(preserve bool) bool {
|
|||||||
// Pass 1: see if it has at least one finalizer.
|
// Pass 1: see if it has at least one finalizer.
|
||||||
hasFin := false
|
hasFin := false
|
||||||
endOffset := p - s.base() + size
|
endOffset := p - s.base() + size
|
||||||
for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
|
for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
|
||||||
if tmp.kind == _KindSpecialFinalizer {
|
if tmp.kind == _KindSpecialFinalizer {
|
||||||
// Stop freeing of object if it has a finalizer.
|
// Stop freeing of object if it has a finalizer.
|
||||||
mbits.setMarkedNonAtomic()
|
mbits.setMarkedNonAtomic()
|
||||||
@ -377,27 +376,23 @@ func (s *mspan) sweep(preserve bool) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Pass 2: queue all finalizers _or_ handle profile record.
|
// Pass 2: queue all finalizers _or_ handle profile record.
|
||||||
for special != nil && uintptr(special.offset) < endOffset {
|
for siter.valid() && uintptr(siter.s.offset) < endOffset {
|
||||||
// Find the exact byte for which the special was setup
|
// Find the exact byte for which the special was setup
|
||||||
// (as opposed to object beginning).
|
// (as opposed to object beginning).
|
||||||
|
special := siter.s
|
||||||
p := s.base() + uintptr(special.offset)
|
p := s.base() + uintptr(special.offset)
|
||||||
if special.kind == _KindSpecialFinalizer || !hasFin {
|
if special.kind == _KindSpecialFinalizer || !hasFin {
|
||||||
// Splice out special record.
|
siter.unlinkAndNext()
|
||||||
y := special
|
freeSpecial(special, unsafe.Pointer(p), size)
|
||||||
special = special.next
|
|
||||||
*specialp = special
|
|
||||||
freespecial(y, unsafe.Pointer(p), size)
|
|
||||||
} else {
|
} else {
|
||||||
// This is profile record, but the object has finalizers (so kept alive).
|
// This is profile record, but the object has finalizers (so kept alive).
|
||||||
// Keep special record.
|
// Keep special record.
|
||||||
specialp = &special.next
|
siter.next()
|
||||||
special = *specialp
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// object is still live: keep special record
|
// object is still live: keep special record
|
||||||
specialp = &special.next
|
siter.next()
|
||||||
special = *specialp
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if hadSpecials && s.specials == nil {
|
if hadSpecials && s.specials == nil {
|
||||||
|
@ -1854,9 +1854,37 @@ func setprofilebucket(p unsafe.Pointer, b *bucket) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do whatever cleanup needs to be done to deallocate s. It has
|
// specialsIter helps iterate over specials lists.
|
||||||
// already been unlinked from the mspan specials list.
|
type specialsIter struct {
|
||||||
func freespecial(s *special, p unsafe.Pointer, size uintptr) {
|
pprev **special
|
||||||
|
s *special
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSpecialsIter(span *mspan) specialsIter {
|
||||||
|
return specialsIter{&span.specials, span.specials}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *specialsIter) valid() bool {
|
||||||
|
return i.s != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *specialsIter) next() {
|
||||||
|
i.pprev = &i.s.next
|
||||||
|
i.s = *i.pprev
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlinkAndNext removes the current special from the list and moves
|
||||||
|
// the iterator to the next special. It returns the unlinked special.
|
||||||
|
func (i *specialsIter) unlinkAndNext() *special {
|
||||||
|
cur := i.s
|
||||||
|
i.s = cur.next
|
||||||
|
*i.pprev = i.s
|
||||||
|
return cur
|
||||||
|
}
|
||||||
|
|
||||||
|
// freeSpecial performs any cleanup on special s and deallocates it.
|
||||||
|
// s must already be unlinked from the specials list.
|
||||||
|
func freeSpecial(s *special, p unsafe.Pointer, size uintptr) {
|
||||||
switch s.kind {
|
switch s.kind {
|
||||||
case _KindSpecialFinalizer:
|
case _KindSpecialFinalizer:
|
||||||
sf := (*specialfinalizer)(unsafe.Pointer(s))
|
sf := (*specialfinalizer)(unsafe.Pointer(s))
|
||||||
|
Loading…
Reference in New Issue
Block a user