1
0
mirror of https://github.com/golang/go synced 2024-11-23 14:40:02 -07:00

runtime: remove scavAddr in favor of address ranges

This change removes the concept of s.scavAddr in favor of explicitly
reserving and unreserving address ranges. s.scavAddr has several
problems with raciness that can cause the scavenger to miss updates, or
move it back unnecessarily, forcing future scavenge calls to iterate
over searched address space unnecessarily.

This change achieves this by replacing scavAddr with a second addrRanges
which is cloned from s.inUse at the end of each sweep phase. Ranges from
this second addrRanges are then reserved by scavengers (with the
reservation size proportional to the heap size) who are then able to
safely iterate over those ranges without worry of another scavenger
coming in.

Fixes #35788.

Change-Id: Ief01ae170384174875118742f6c26b2a41cbb66d
Reviewed-on: https://go-review.googlesource.com/c/go/+/208378
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Michael Anthony Knyszek 2019-11-21 17:05:14 +00:00 committed by Michael Knyszek
parent b1a48af7e8
commit 55ec5182d7
8 changed files with 312 additions and 178 deletions

View File

@ -735,9 +735,12 @@ func (p *PageAlloc) Free(base, npages uintptr) {
func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
}
func (p *PageAlloc) Scavenge(nbytes uintptr, locked bool) (r uintptr) {
func (p *PageAlloc) Scavenge(nbytes uintptr, mayUnlock bool) (r uintptr) {
pp := (*pageAlloc)(p)
systemstack(func() {
r = (*pageAlloc)(p).scavenge(nbytes, locked)
lock(pp.mheapLock)
r = pp.scavenge(nbytes, mayUnlock)
unlock(pp.mheapLock)
})
return
}
@ -819,7 +822,6 @@ func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
}
}
}
p.resetScavengeAddr()
// Apply alloc state.
for _, s := range init {
@ -833,6 +835,11 @@ func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
// Update heap metadata for the allocRange calls above.
p.update(addr, pallocChunkPages, false, false)
}
systemstack(func() {
lock(p.mheapLock)
p.scavengeStartGen()
unlock(p.mheapLock)
})
return (*PageAlloc)(p)
}

View File

@ -104,10 +104,11 @@ It is a comma-separated list of name=val pairs setting these named variables:
scavenger as well as the total amount of memory returned to the operating system
and an estimate of physical memory utilization. The format of this line is subject
to change, but currently it is:
scav # KiB work, # KiB total, #% util
scav # # KiB work, # KiB total, #% util
where the fields are as follows:
# KiB work the amount of memory returned to the OS since the last scav line
# KiB total how much of the heap at this point in time has been released to the OS
scav # the scavenge cycle number
# KiB work the amount of memory returned to the OS since the last line
# KiB total the total amount of memory returned to the OS
#% util the fraction of all unscavenged memory which is in-use
If the line ends with "(forced)", then scavenging was forced by a
debug.FreeOSMemory() call.

View File

@ -91,6 +91,11 @@ const (
// This ratio is used as part of multiplicative factor to help the scavenger account
// for the additional costs of using scavenged memory in its pacing.
scavengeCostRatio = 0.7 * sys.GoosDarwin
// scavengeReservationShards determines the amount of memory the scavenger
// should reserve for scavenging at a time. Specifically, the amount of
// memory reserved is (heap size in bytes) / scavengeReservationShards.
scavengeReservationShards = 64
)
// heapRetained returns an estimate of the current heap RSS.
@ -293,13 +298,14 @@ func bgscavenge(c chan int) {
unlock(&mheap_.lock)
return
}
unlock(&mheap_.lock)
// Scavenge one page, and measure the amount of time spent scavenging.
start := nanotime()
released = mheap_.pages.scavengeOne(physPageSize, false)
atomic.Xadduintptr(&mheap_.pages.scavReleased, released)
released = mheap_.pages.scavenge(physPageSize, true)
mheap_.pages.scav.released += released
crit = float64(nanotime() - start)
unlock(&mheap_.lock)
})
if released == 0 {
@ -379,28 +385,36 @@ func bgscavenge(c chan int) {
// scavenge scavenges nbytes worth of free pages, starting with the
// highest address first. Successive calls continue from where it left
// off until the heap is exhausted. Call resetScavengeAddr to bring it
// off until the heap is exhausted. Call scavengeStartGen to bring it
// back to the top of the heap.
//
// Returns the amount of memory scavenged in bytes.
//
// If locked == false, s.mheapLock must not be locked. If locked == true,
// s.mheapLock must be locked.
// s.mheapLock must be held, but may be temporarily released if
// mayUnlock == true.
//
// Must run on the system stack because scavengeOne must run on the
// system stack.
// Must run on the system stack because s.mheapLock must be held.
//
//go:systemstack
func (s *pageAlloc) scavenge(nbytes uintptr, locked bool) uintptr {
func (s *pageAlloc) scavenge(nbytes uintptr, mayUnlock bool) uintptr {
var (
addrs addrRange
gen uint32
)
released := uintptr(0)
for released < nbytes {
r := s.scavengeOne(nbytes-released, locked)
if r == 0 {
// Nothing left to scavenge! Give up.
break
if addrs.size() == 0 {
if addrs, gen = s.scavengeReserve(); addrs.size() == 0 {
break
}
}
r, a := s.scavengeOne(addrs, nbytes-released, mayUnlock)
released += r
addrs = a
}
// Only unreserve the space which hasn't been scavenged or searched
// to ensure we always make progress.
s.scavengeUnreserve(addrs, gen)
return released
}
@ -409,9 +423,9 @@ func (s *pageAlloc) scavenge(nbytes uintptr, locked bool) uintptr {
// released should be the amount of memory released since the last time this
// was called, and forced indicates whether the scavenge was forced by the
// application.
func printScavTrace(released uintptr, forced bool) {
func printScavTrace(gen uint32, released uintptr, forced bool) {
printlock()
print("scav ",
print("scav ", gen, " ",
released>>10, " KiB work, ",
atomic.Load64(&memstats.heap_released)>>10, " KiB total, ",
(atomic.Load64(&memstats.heap_inuse)*100)/heapRetained(), "% util",
@ -423,38 +437,110 @@ func printScavTrace(released uintptr, forced bool) {
printunlock()
}
// resetScavengeAddr sets the scavenge start address to the top of the heap's
// address space. This should be called whenever the sweeper is done.
// scavengeStartGen starts a new scavenge generation, resetting
// the scavenger's search space to the full in-use address space.
//
// s.mheapLock must be held.
func (s *pageAlloc) resetScavengeAddr() {
released := atomic.Loaduintptr(&s.scavReleased)
if debug.scavtrace > 0 {
printScavTrace(released, false)
}
// Subtract from scavReleased instead of just setting it to zero because
// the scavenger could have increased scavReleased concurrently with the
// load above, and we may miss an update by just blindly zeroing the field.
atomic.Xadduintptr(&s.scavReleased, -released)
s.scavAddr = chunkBase(s.end) - 1
}
// scavengeOne starts from s.scavAddr and walks down the heap until it finds
// a contiguous run of pages to scavenge. It will try to scavenge at most
// max bytes at once, but may scavenge more to avoid breaking huge pages. Once
// it scavenges some memory it returns how much it scavenged and updates s.scavAddr
// appropriately. s.scavAddr must be reset manually and externally.
//
// Should it exhaust the heap, it will return 0 and set s.scavAddr to minScavAddr.
//
// If locked == false, s.mheapLock must not be locked.
// If locked == true, s.mheapLock must be locked.
//
// Must be run on the system stack because it either acquires the heap lock
// or executes with the heap lock acquired.
// Must run on the system stack because s.mheapLock must be held.
//
//go:systemstack
func (s *pageAlloc) scavengeOne(max uintptr, locked bool) uintptr {
func (s *pageAlloc) scavengeStartGen() {
if debug.scavtrace > 0 {
printScavTrace(s.scav.gen, s.scav.released, false)
}
s.inUse.cloneInto(&s.scav.inUse)
// reservationBytes may be zero if s.inUse.totalBytes is small, or if
// scavengeReservationShards is large. This case is fine as the scavenger
// will simply be turned off, but it does mean that scavengeReservationShards,
// in concert with pallocChunkBytes, dictates the minimum heap size at which
// the scavenger triggers. In practice this minimum is generally less than an
// arena in size, so virtually every heap has the scavenger on.
s.scav.reservationBytes = alignUp(s.inUse.totalBytes, pallocChunkBytes) / scavengeReservationShards
s.scav.gen++
s.scav.released = 0
}
// scavengeReserve reserves a contiguous range of the address space
// for scavenging. The maximum amount of space it reserves is proportional
// to the size of the heap. The ranges are reserved from the high addresses
// first.
//
// Returns the reserved range and the scavenge generation number for it.
//
// s.mheapLock must be held.
//
// Must run on the system stack because s.mheapLock must be held.
//
//go:systemstack
func (s *pageAlloc) scavengeReserve() (addrRange, uint32) {
// Start by reserving the minimum.
r := s.scav.inUse.removeLast(s.scav.reservationBytes)
// Return early if the size is zero; we don't want to use
// the bogus address below.
if r.size() == 0 {
return r, s.scav.gen
}
// The scavenger requires that base be aligned to a
// palloc chunk because that's the unit of operation for
// the scavenger, so align down, potentially extending
// the range.
newBase := alignDown(r.base, pallocChunkBytes)
// Remove from inUse however much extra we just pulled out.
s.scav.inUse.removeGreaterEqual(newBase)
r.base = newBase
return r, s.scav.gen
}
// scavengeUnreserve returns an unscavenged portion of a range that was
// previously reserved with scavengeReserve.
//
// s.mheapLock must be held.
//
// Must run on the system stack because s.mheapLock must be held.
//
//go:systemstack
func (s *pageAlloc) scavengeUnreserve(r addrRange, gen uint32) {
if r.size() == 0 || gen != s.scav.gen {
return
}
if r.base%pallocChunkBytes != 0 {
throw("unreserving unaligned region")
}
s.scav.inUse.add(r)
}
// scavengeOne walks over address range work until it finds
// a contiguous run of pages to scavenge. It will try to scavenge
// at most max bytes at once, but may scavenge more to avoid
// breaking huge pages. Once it scavenges some memory it returns
// how much it scavenged in bytes.
//
// Returns the number of bytes scavenged and the part of work
// which was not yet searched.
//
// work's base address must be aligned to pallocChunkBytes.
//
// s.mheapLock must be held, but may be temporarily released if
// mayUnlock == true.
//
// Must run on the system stack because s.mheapLock must be held.
//
//go:systemstack
func (s *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (uintptr, addrRange) {
// Defensively check if we've recieved an empty address range.
// If so, just return.
if work.size() == 0 {
// Nothing to do.
return 0, work
}
// Check the prerequisites of work.
if work.base%pallocChunkBytes != 0 {
throw("scavengeOne called with unaligned work region")
}
// Calculate the maximum number of pages to scavenge.
//
// This should be alignUp(max, pageSize) / pageSize but max can and will
@ -476,84 +562,49 @@ func (s *pageAlloc) scavengeOne(max uintptr, locked bool) uintptr {
minPages = 1
}
// Helpers for locking and unlocking only if locked == false.
// Helpers for locking and unlocking only if mayUnlock == true.
lockHeap := func() {
if !locked {
if mayUnlock {
lock(s.mheapLock)
}
}
unlockHeap := func() {
if !locked {
if mayUnlock {
unlock(s.mheapLock)
}
}
lockHeap()
ci := chunkIndex(s.scavAddr)
if ci < s.start {
unlockHeap()
return 0
}
// Check the chunk containing the scav addr, starting at the addr
// and see if there are any free and unscavenged pages.
// Fast path: check the chunk containing the top-most address in work,
// starting at that address's page index in the chunk.
//
// Only check this if s.scavAddr is covered by any address range
// in s.inUse, so that we know our check of the summary is safe.
if s.inUse.contains(s.scavAddr) && s.summary[len(s.summary)-1][ci].max() >= uint(minPages) {
// Note that work.limit is exclusive, so get the chunk we care about
// by subtracting 1.
maxAddr := work.limit - 1
maxChunk := chunkIndex(maxAddr)
if s.summary[len(s.summary)-1][maxChunk].max() >= uint(minPages) {
// We only bother looking for a candidate if there at least
// minPages free pages at all. It's important that we only
// continue if the summary says we can because that's how
// we can tell if parts of the address space are unused.
// See the comment on s.chunks in mpagealloc.go.
base, npages := s.chunkOf(ci).findScavengeCandidate(chunkPageIndex(s.scavAddr), minPages, maxPages)
// minPages free pages at all.
base, npages := s.chunkOf(maxChunk).findScavengeCandidate(chunkPageIndex(maxAddr), minPages, maxPages)
// If we found something, scavenge it and return!
if npages != 0 {
s.scavengeRangeLocked(ci, base, npages)
unlockHeap()
return uintptr(npages) * pageSize
work.limit = s.scavengeRangeLocked(maxChunk, base, npages)
return uintptr(npages) * pageSize, work
}
}
// Update the limit to reflect the fact that we checked maxChunk already.
work.limit = chunkBase(maxChunk)
// getInUseRange returns the highest range in the
// intersection of [0, addr] and s.inUse.
// findCandidate finds the next scavenge candidate in work optimistically.
//
// s.mheapLock must be held.
getInUseRange := func(addr uintptr) addrRange {
top := s.inUse.findSucc(addr)
if top == 0 {
return addrRange{}
}
r := s.inUse.ranges[top-1]
// addr is inclusive, so treat it as such when
// updating the limit, which is exclusive.
if r.limit > addr+1 {
r.limit = addr + 1
}
return r
}
// Slow path: iterate optimistically over the in-use address space
// looking for any free and unscavenged page. If we think we see something,
// lock and verify it!
// Returns the candidate chunk index and true on success, and false on failure.
//
// We iterate over the address space by taking ranges from inUse.
newRange:
for {
r := getInUseRange(s.scavAddr)
if r.size() == 0 {
break
}
unlockHeap()
// Iterate over all of the chunks described by r.
// Note that r.limit is the exclusive upper bound, but what
// we want is the top chunk instead, inclusive, so subtract 1.
bot, top := chunkIndex(r.base), chunkIndex(r.limit-1)
for i := top; i >= bot; i-- {
// The heap need not be locked.
findCandidate := func(work addrRange) (chunkIdx, bool) {
// Iterate over this work's chunks.
for i := chunkIndex(work.limit - 1); i >= chunkIndex(work.base); i-- {
// If this chunk is totally in-use or has no unscavenged pages, don't bother
// doing a more sophisticated check.
// doing a more sophisticated check.
//
// Note we're accessing the summary and the chunks without a lock, but
// that's fine. We're being optimistic anyway.
@ -570,70 +621,72 @@ newRange:
// see a nil pointer in this case if we do race with heap growth, but
// just defensively ignore the nils. This operation is optimistic anyway.
l2 := (*[1 << pallocChunksL2Bits]pallocData)(atomic.Loadp(unsafe.Pointer(&s.chunks[i.l1()])))
if l2 == nil || !l2[i.l2()].hasScavengeCandidate(minPages) {
continue
if l2 != nil && l2[i.l2()].hasScavengeCandidate(minPages) {
return i, true
}
// We found a candidate, so let's lock and verify it.
lockHeap()
// Find, verify, and scavenge if we can.
chunk := s.chunkOf(i)
base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages)
if npages > 0 {
// We found memory to scavenge! Mark the bits and report that up.
// scavengeRangeLocked will update scavAddr for us, also.
s.scavengeRangeLocked(i, base, npages)
unlockHeap()
return uintptr(npages) * pageSize
}
// We were fooled, let's take this opportunity to move the scavAddr
// all the way down to where we searched as scavenged for future calls
// and keep iterating. Then, go get a new range.
s.scavAddr = chunkBase(i-1) + pallocChunkPages*pageSize - 1
continue newRange
}
return 0, false
}
// Slow path: iterate optimistically over the in-use address space
// looking for any free and unscavenged page. If we think we see something,
// lock and verify it!
for work.size() != 0 {
unlockHeap()
// Search for the candidate.
candidateChunkIdx, ok := findCandidate(work)
// Lock the heap. We need to do this now if we found a candidate or not.
// If we did, we'll verify it. If not, we need to lock before returning
// anyway.
lockHeap()
// Move the scavenger down the heap, past everything we just searched.
// Since we don't check if scavAddr moved while twe let go of the heap lock,
// it's possible that it moved down and we're moving it up here. This
// raciness could result in us searching parts of the heap unnecessarily.
// TODO(mknyszek): Remove this racy behavior through explicit address
// space reservations, which are difficult to do with just scavAddr.
s.scavAddr = r.base - 1
}
// We reached the end of the in-use address space and couldn't find anything,
// so signal that there's nothing left to scavenge.
s.scavAddr = minScavAddr
unlockHeap()
if !ok {
// We didn't find a candidate, so we're done.
work.limit = work.base
break
}
return 0
// Find, verify, and scavenge if we can.
chunk := s.chunkOf(candidateChunkIdx)
base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages)
if npages > 0 {
work.limit = s.scavengeRangeLocked(candidateChunkIdx, base, npages)
return uintptr(npages) * pageSize, work
}
// We were fooled, so let's continue from where we left off.
work.limit = chunkBase(candidateChunkIdx)
}
return 0, work
}
// scavengeRangeLocked scavenges the given region of memory.
// The region of memory is described by its chunk index (ci),
// the starting page index of the region relative to that
// chunk (base), and the length of the region in pages (npages).
//
// Returns the base address of the scavenged region.
//
// s.mheapLock must be held.
func (s *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) {
func (s *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr {
s.chunkOf(ci).scavenged.setRange(base, npages)
// Compute the full address for the start of the range.
addr := chunkBase(ci) + uintptr(base)*pageSize
// Update the scav pointer.
s.scavAddr = addr - 1
// Only perform the actual scavenging if we're not in a test.
// It's dangerous to do so otherwise.
if s.test {
return
return addr
}
sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize)
// Update global accounting only when not in test, otherwise
// the runtime's accounting will be wrong.
mSysStatInc(&memstats.heap_released, uintptr(npages)*pageSize)
return addr
}
// fillAligned returns x but with all zeroes in m-aligned

View File

@ -419,12 +419,12 @@ func TestPageAllocScavenge(t *testing.T) {
}
for name, v := range tests {
v := v
runTest := func(t *testing.T, locked bool) {
runTest := func(t *testing.T, mayUnlock bool) {
b := NewPageAlloc(v.beforeAlloc, v.beforeScav)
defer FreePageAlloc(b)
for iter, h := range v.expect {
if got := b.Scavenge(h.request, locked); got != h.expect {
if got := b.Scavenge(h.request, mayUnlock); got != h.expect {
t.Fatalf("bad scavenge #%d: want %d, got %d", iter+1, h.expect, got)
}
}
@ -436,7 +436,7 @@ func TestPageAllocScavenge(t *testing.T) {
t.Run(name, func(t *testing.T) {
runTest(t, false)
})
t.Run(name+"Locked", func(t *testing.T) {
t.Run(name+"MayUnlock", func(t *testing.T) {
runTest(t, true)
})
}

View File

@ -246,8 +246,8 @@ func sweepone() uintptr {
// Decrement the number of active sweepers and if this is the
// last one print trace information.
if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 {
// Since the sweeper is done, reset the scavenger's pointer
// into the heap and wake it if necessary.
// Since the sweeper is done, move the scavenge gen forward (signalling
// that there's new work to do) and wake the scavenger.
//
// The scavenger is signaled by the last sweeper because once
// sweeping is done, we will definitely have useful work for
@ -259,7 +259,7 @@ func sweepone() uintptr {
// with scavenging work.
systemstack(func() {
lock(&mheap_.lock)
mheap_.pages.resetScavengeAddr()
mheap_.pages.scavengeStartGen()
unlock(&mheap_.lock)
})
// Since we might sweep in an allocation path, it's not possible

View File

@ -98,7 +98,7 @@ type mheap struct {
// For !go115NewMCentralImpl.
sweepSpans [2]gcSweepBuf
// _ uint32 // align uint64 fields on 32-bit for atomics
_ uint32 // align uint64 fields on 32-bit for atomics
// Proportional sweep
//
@ -1389,7 +1389,7 @@ func (h *mheap) grow(npage uintptr) bool {
if overage := uintptr(retained + uint64(totalGrowth) - h.scavengeGoal); todo > overage {
todo = overage
}
h.pages.scavenge(todo, true)
h.pages.scavenge(todo, false)
}
return true
}
@ -1473,9 +1473,9 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
h.freeMSpanLocked(s)
}
// scavengeAll visits each node in the free treap and scavenges the
// treapNode's span. It then removes the scavenged span from
// unscav and adds it into scav before continuing.
// scavengeAll acquires the heap lock (blocking any additional
// manipulation of the page allocator) and iterates over the whole
// heap, scavenging every free page available.
func (h *mheap) scavengeAll() {
// Disallow malloc or panic while holding the heap lock. We do
// this here because this is a non-mallocgc entry-point to
@ -1483,14 +1483,16 @@ func (h *mheap) scavengeAll() {
gp := getg()
gp.m.mallocing++
lock(&h.lock)
// Reset the scavenger address so we have access to the whole heap.
h.pages.resetScavengeAddr()
released := h.pages.scavenge(^uintptr(0), true)
// Start a new scavenge generation so we have a chance to walk
// over the whole heap.
h.pages.scavengeStartGen()
released := h.pages.scavenge(^uintptr(0), false)
gen := h.pages.scav.gen
unlock(&h.lock)
gp.m.mallocing--
if debug.scavtrace > 0 {
printScavTrace(released, true)
printScavTrace(gen, released, true)
}
}

View File

@ -88,11 +88,6 @@ const (
// value in the shifted address space, but searchAddr is stored as a regular
// memory address. See arenaBaseOffset for details.
maxSearchAddr = ^uintptr(0) - arenaBaseOffset
// Minimum scavAddr value, which indicates that the scavenger is done.
//
// minScavAddr + arenaBaseOffset == 0
minScavAddr = (^arenaBaseOffset + 1) & uintptrMask
)
// Global chunk index.
@ -239,15 +234,6 @@ type pageAlloc struct {
// space on architectures with segmented address spaces.
searchAddr uintptr
// The address to start a scavenge candidate search with. It
// need not point to memory contained in inUse.
scavAddr uintptr
// The amount of memory scavenged since the last scavtrace print.
//
// Read and updated atomically.
scavReleased uintptr
// start and end represent the chunk indices
// which pageAlloc knows about. It assumes
// chunks in the range [start, end) are
@ -267,6 +253,25 @@ type pageAlloc struct {
// All access is protected by the mheapLock.
inUse addrRanges
// scav stores the scavenger state.
//
// All fields are protected by mheapLock.
scav struct {
// inUse is a slice of ranges of address space which have not
// yet been looked at by the scavenger.
inUse addrRanges
// gen is the scavenge generation number.
gen uint32
// reservationBytes is how large of a reservation should be made
// in bytes of address space for each scavenge iteration.
reservationBytes uintptr
// released is the amount of memory released this generation.
released uintptr
}
// mheap_.lock. This level of indirection makes it possible
// to test pageAlloc indepedently of the runtime allocator.
mheapLock *mutex
@ -299,9 +304,6 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
// Start with the searchAddr in a state indicating there's no free memory.
s.searchAddr = maxSearchAddr
// Start with the scavAddr in a state indicating there's nothing more to do.
s.scavAddr = minScavAddr
// Set the mheapLock.
s.mheapLock = mheapLock
}

View File

@ -65,6 +65,10 @@ type addrRanges struct {
// ranges is a slice of ranges sorted by base.
ranges []addrRange
// totalBytes is the total amount of address space in bytes counted by
// this addrRanges.
totalBytes uintptr
// sysStat is the stat to track allocations by this type
sysStat *uint64
}
@ -75,6 +79,7 @@ func (a *addrRanges) init(sysStat *uint64) {
ranges.cap = 16
ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, sysStat))
a.sysStat = sysStat
a.totalBytes = 0
}
// findSucc returns the first index in a such that base is
@ -158,4 +163,68 @@ func (a *addrRanges) add(r addrRange) {
}
a.ranges[i] = r
}
a.totalBytes += r.size()
}
// removeLast removes and returns the highest-addressed contiguous range
// of a, or the last nBytes of that range, whichever is smaller. If a is
// empty, it returns an empty range.
func (a *addrRanges) removeLast(nBytes uintptr) addrRange {
if len(a.ranges) == 0 {
return addrRange{}
}
r := a.ranges[len(a.ranges)-1]
size := r.size()
if size > nBytes {
newLimit := r.limit - nBytes
a.ranges[len(a.ranges)-1].limit = newLimit
a.totalBytes -= nBytes
return addrRange{newLimit, r.limit}
}
a.ranges = a.ranges[:len(a.ranges)-1]
a.totalBytes -= size
return r
}
// removeGreaterEqual removes the ranges of a which are above addr, and additionally
// splits any range containing addr.
func (a *addrRanges) removeGreaterEqual(addr uintptr) {
pivot := a.findSucc(addr)
if pivot == 0 {
// addr is before all ranges in a.
a.totalBytes = 0
a.ranges = a.ranges[:0]
return
}
removed := uintptr(0)
for _, r := range a.ranges[pivot:] {
removed += r.size()
}
if r := a.ranges[pivot-1]; r.contains(addr) {
removed += r.size()
r = r.subtract(addrRange{addr, maxSearchAddr})
if r.size() == 0 {
pivot--
} else {
removed -= r.size()
a.ranges[pivot-1] = r
}
}
a.ranges = a.ranges[:pivot]
a.totalBytes -= removed
}
// cloneInto makes a deep clone of a's state into b, re-using
// b's ranges if able.
func (a *addrRanges) cloneInto(b *addrRanges) {
if len(a.ranges) > cap(b.ranges) {
// Grow the array.
ranges := (*notInHeapSlice)(unsafe.Pointer(&b.ranges))
ranges.len = 0
ranges.cap = cap(a.ranges)
ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, b.sysStat))
}
b.ranges = b.ranges[:len(a.ranges)]
b.totalBytes = a.totalBytes
copy(b.ranges, a.ranges)
}