1
0
mirror of https://github.com/golang/go synced 2024-11-23 10:00:03 -07:00

runtime: refactor coalescing into its own method

The coalescing process is complex and in a follow-up change we'll need
to do it in more than one place, so this change factors out the
coalescing code in freeSpanLocked into a method on mheap.

Change-Id: Ia266b6cb1157c1b8d3d8a4287b42fbcc032bbf3a
Reviewed-on: https://go-review.googlesource.com/c/157838
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Michael Anthony Knyszek 2019-01-14 21:27:29 +00:00 committed by Michael Knyszek
parent e2ff73286f
commit 79ac638e41

View File

@ -419,6 +419,65 @@ func (s *mspan) physPageBounds() (uintptr, uintptr) {
return start, end
}
func (h *mheap) coalesce(s *mspan) {
// We scavenge s at the end after coalescing if s or anything
// it merged with is marked scavenged.
needsScavenge := false
prescavenged := s.released() // number of bytes already scavenged.
// Coalesce with earlier, later spans.
if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
// Now adjust s.
s.startAddr = before.startAddr
s.npages += before.npages
s.needzero |= before.needzero
h.setSpan(before.base(), s)
// If before or s are scavenged, then we need to scavenge the final coalesced span.
needsScavenge = needsScavenge || before.scavenged || s.scavenged
prescavenged += before.released()
// The size is potentially changing so the treap needs to delete adjacent nodes and
// insert back as a combined node.
if before.scavenged {
h.scav.removeSpan(before)
} else {
h.free.removeSpan(before)
}
before.state = mSpanDead
h.spanalloc.free(unsafe.Pointer(before))
}
// Now check to see if next (greater addresses) span is free and can be coalesced.
if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree {
s.npages += after.npages
s.needzero |= after.needzero
h.setSpan(s.base()+s.npages*pageSize-1, s)
needsScavenge = needsScavenge || after.scavenged || s.scavenged
prescavenged += after.released()
if after.scavenged {
h.scav.removeSpan(after)
} else {
h.free.removeSpan(after)
}
after.state = mSpanDead
h.spanalloc.free(unsafe.Pointer(after))
}
if needsScavenge {
// When coalescing spans, some physical pages which
// were not returned to the OS previously because
// they were only partially covered by the span suddenly
// become available for scavenging. We want to make sure
// those holes are filled in, and the span is properly
// scavenged. Rather than trying to detect those holes
// directly, we collect how many bytes were already
// scavenged above and subtract that from heap_released
// before re-scavenging the entire newly-coalesced span,
// which will implicitly bump up heap_released.
memstats.heap_released -= uint64(prescavenged)
s.scavenge()
}
}
func (s *mspan) scavenge() uintptr {
// start and end must be rounded in, otherwise madvise
// will round them *out* and release more memory
@ -1215,62 +1274,8 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
s.unusedsince = nanotime()
}
// We scavenge s at the end after coalescing if s or anything
// it merged with is marked scavenged.
needsScavenge := false
prescavenged := s.released() // number of bytes already scavenged.
// Coalesce with earlier, later spans.
if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
// Now adjust s.
s.startAddr = before.startAddr
s.npages += before.npages
s.needzero |= before.needzero
h.setSpan(before.base(), s)
// If before or s are scavenged, then we need to scavenge the final coalesced span.
needsScavenge = needsScavenge || before.scavenged || s.scavenged
prescavenged += before.released()
// The size is potentially changing so the treap needs to delete adjacent nodes and
// insert back as a combined node.
if before.scavenged {
h.scav.removeSpan(before)
} else {
h.free.removeSpan(before)
}
before.state = mSpanDead
h.spanalloc.free(unsafe.Pointer(before))
}
// Now check to see if next (greater addresses) span is free and can be coalesced.
if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree {
s.npages += after.npages
s.needzero |= after.needzero
h.setSpan(s.base()+s.npages*pageSize-1, s)
needsScavenge = needsScavenge || after.scavenged || s.scavenged
prescavenged += after.released()
if after.scavenged {
h.scav.removeSpan(after)
} else {
h.free.removeSpan(after)
}
after.state = mSpanDead
h.spanalloc.free(unsafe.Pointer(after))
}
if needsScavenge {
// When coalescing spans, some physical pages which
// were not returned to the OS previously because
// they were only partially covered by the span suddenly
// become available for scavenging. We want to make sure
// those holes are filled in, and the span is properly
// scavenged. Rather than trying to detect those holes
// directly, we collect how many bytes were already
// scavenged above and subtract that from heap_released
// before re-scavenging the entire newly-coalesced span,
// which will implicitly bump up heap_released.
memstats.heap_released -= uint64(prescavenged)
s.scavenge()
}
// Coalesce span with neighbors.
h.coalesce(s)
// Insert s into the appropriate treap.
if s.scavenged {