mirror of
https://github.com/golang/go
synced 2024-10-02 10:18:33 -06:00
3479b065d4
Instead of building a freelist from the mark bits generated by the GC this CL allocates directly from the mark bits. The approach moves the mark bits from the pointer/no pointer heap structures into their own per span data structures. The mark/allocation vectors consist of a single mark bit per object. Two vectors are maintained, one for allocation and one for the GC's mark phase. During the GC cycle's sweep phase the interpretation of the vectors is swapped. The mark vector becomes the allocation vector and the old allocation vector is cleared and becomes the mark vector that the next GC cycle will use. Marked entries in the allocation vector indicate that the object is not free. Each allocation vector maintains a boundary between areas of the span already allocated from and areas not yet allocated from. As objects are allocated this boundary is moved until it reaches the end of the span. At this point further allocations will be done from another span. Since we no longer sweep a span inspecting each freed object the responsibility for maintaining pointer/scalar bits in the heapBitMap containing is now the responsibility of the the routines doing the actual allocation. This CL is functionally complete and ready for performance tuning. Change-Id: I336e0fc21eef1066e0b68c7067cc71b9f3d50e04 Reviewed-on: https://go-review.googlesource.com/19470 Reviewed-by: Austin Clements <austin@google.com>
212 lines
5.3 KiB
Go
212 lines
5.3 KiB
Go
// Copyright 2009 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// Central free lists.
|
|
//
|
|
// See malloc.go for an overview.
|
|
//
|
|
// The MCentral doesn't actually contain the list of free objects; the MSpan does.
|
|
// Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
|
|
// and those that are completely allocated (c->empty).
|
|
|
|
package runtime
|
|
|
|
import "runtime/internal/atomic"
|
|
|
|
// Central list of free objects of a given size.
|
|
type mcentral struct {
|
|
lock mutex
|
|
sizeclass int32
|
|
nonempty mSpanList // list of spans with a free object, ie a nonempty free list
|
|
empty mSpanList // list of spans with no free objects (or cached in an mcache)
|
|
}
|
|
|
|
// Initialize a single central free list.
|
|
func (c *mcentral) init(sizeclass int32) {
|
|
c.sizeclass = sizeclass
|
|
c.nonempty.init()
|
|
c.empty.init()
|
|
}
|
|
|
|
// Allocate a span to use in an MCache.
|
|
func (c *mcentral) cacheSpan() *mspan {
|
|
// Deduct credit for this span allocation and sweep if necessary.
|
|
spanBytes := uintptr(class_to_allocnpages[c.sizeclass]) * _PageSize
|
|
deductSweepCredit(spanBytes, 0)
|
|
|
|
lock(&c.lock)
|
|
sg := mheap_.sweepgen
|
|
retry:
|
|
var s *mspan
|
|
for s = c.nonempty.first; s != nil; s = s.next {
|
|
if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
|
|
c.nonempty.remove(s)
|
|
c.empty.insertBack(s)
|
|
unlock(&c.lock)
|
|
s.sweep(true)
|
|
goto havespan
|
|
}
|
|
if s.sweepgen == sg-1 {
|
|
// the span is being swept by background sweeper, skip
|
|
continue
|
|
}
|
|
// we have a nonempty span that does not require sweeping, allocate from it
|
|
c.nonempty.remove(s)
|
|
c.empty.insertBack(s)
|
|
unlock(&c.lock)
|
|
goto havespan
|
|
}
|
|
|
|
for s = c.empty.first; s != nil; s = s.next {
|
|
if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
|
|
// we have an empty span that requires sweeping,
|
|
// sweep it and see if we can free some space in it
|
|
c.empty.remove(s)
|
|
// swept spans are at the end of the list
|
|
c.empty.insertBack(s)
|
|
unlock(&c.lock)
|
|
s.sweep(true)
|
|
freeIndex := s.nextFreeIndex(0)
|
|
if freeIndex != s.nelems {
|
|
s.freeindex = freeIndex
|
|
goto havespan
|
|
}
|
|
lock(&c.lock)
|
|
// the span is still empty after sweep
|
|
// it is already in the empty list, so just retry
|
|
goto retry
|
|
}
|
|
if s.sweepgen == sg-1 {
|
|
// the span is being swept by background sweeper, skip
|
|
continue
|
|
}
|
|
// already swept empty span,
|
|
// all subsequent ones must also be either swept or in process of sweeping
|
|
break
|
|
}
|
|
unlock(&c.lock)
|
|
|
|
// Replenish central list if empty.
|
|
s = c.grow()
|
|
if s == nil {
|
|
return nil
|
|
}
|
|
lock(&c.lock)
|
|
c.empty.insertBack(s)
|
|
unlock(&c.lock)
|
|
|
|
// At this point s is a non-empty span, queued at the end of the empty list,
|
|
// c is unlocked.
|
|
havespan:
|
|
cap := int32((s.npages << _PageShift) / s.elemsize)
|
|
n := cap - int32(s.ref)
|
|
if n == 0 {
|
|
throw("empty span")
|
|
}
|
|
usedBytes := uintptr(s.ref) * s.elemsize
|
|
if usedBytes > 0 {
|
|
reimburseSweepCredit(usedBytes)
|
|
}
|
|
atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
|
|
if trace.enabled {
|
|
// heap_live changed.
|
|
traceHeapAlloc()
|
|
}
|
|
if gcBlackenEnabled != 0 {
|
|
// heap_live changed.
|
|
gcController.revise()
|
|
}
|
|
s.incache = true
|
|
return s
|
|
}
|
|
|
|
// Return span from an MCache.
|
|
func (c *mcentral) uncacheSpan(s *mspan) {
|
|
lock(&c.lock)
|
|
|
|
s.incache = false
|
|
|
|
if s.ref == 0 {
|
|
throw("uncaching full span")
|
|
}
|
|
|
|
cap := int32((s.npages << _PageShift) / s.elemsize)
|
|
n := cap - int32(s.ref)
|
|
if n > 0 {
|
|
c.empty.remove(s)
|
|
c.nonempty.insert(s)
|
|
// mCentral_CacheSpan conservatively counted
|
|
// unallocated slots in heap_live. Undo this.
|
|
atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
|
|
}
|
|
unlock(&c.lock)
|
|
}
|
|
|
|
// Free n objects from a span s back into the central free list c.
|
|
// Called during sweep.
|
|
// Returns true if the span was returned to heap. Sets sweepgen to
|
|
// the latest generation.
|
|
// If preserve=true, don't return the span to heap nor relink in MCentral lists;
|
|
// caller takes care of it.
|
|
func (c *mcentral) freeSpan(s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool, wasempty bool) bool {
|
|
if s.incache {
|
|
throw("freeSpan given cached span")
|
|
}
|
|
|
|
s.ref -= uint16(n)
|
|
|
|
if preserve {
|
|
// preserve is set only when called from MCentral_CacheSpan above,
|
|
// the span must be in the empty list.
|
|
if !s.inList() {
|
|
throw("can't preserve unlinked span")
|
|
}
|
|
atomic.Store(&s.sweepgen, mheap_.sweepgen)
|
|
return false
|
|
}
|
|
|
|
lock(&c.lock)
|
|
|
|
// Move to nonempty if necessary.
|
|
if wasempty {
|
|
c.empty.remove(s)
|
|
c.nonempty.insert(s)
|
|
}
|
|
|
|
// delay updating sweepgen until here. This is the signal that
|
|
// the span may be used in an MCache, so it must come after the
|
|
// linked list operations above (actually, just after the
|
|
// lock of c above.)
|
|
atomic.Store(&s.sweepgen, mheap_.sweepgen)
|
|
|
|
if s.ref != 0 {
|
|
unlock(&c.lock)
|
|
return false
|
|
}
|
|
|
|
c.nonempty.remove(s)
|
|
s.needzero = 1
|
|
unlock(&c.lock)
|
|
mheap_.freeSpan(s, 0)
|
|
return true
|
|
}
|
|
|
|
// grow allocates a new empty span from the heap and initializes it for c's size class.
|
|
func (c *mcentral) grow() *mspan {
|
|
npages := uintptr(class_to_allocnpages[c.sizeclass])
|
|
size := uintptr(class_to_size[c.sizeclass])
|
|
n := (npages << _PageShift) / size
|
|
|
|
s := mheap_.alloc(npages, c.sizeclass, false, true)
|
|
if s == nil {
|
|
return nil
|
|
}
|
|
|
|
p := uintptr(s.start << _PageShift)
|
|
s.limit = p + size*n
|
|
|
|
heapBitsForSpan(s.base()).initSpan(s)
|
|
return s
|
|
}
|