1
0
mirror of https://github.com/golang/go synced 2024-11-23 14:50:07 -07:00

runtime: call sysHugePage less often

Currently when we coalesce memory we make a sysHugePage call
(MADV_HUGEPAGE) to ensure freed and coalesced huge pages are treated as
such so the scavenger's assumptions about performance are more in line
with reality.

Unfortunately we do it way too often because we do it if there was any
change to the huge page count for the span we're coalescing into, not
taking into account that it could coalesce with its neighbors and not
actually create a new huge page.

This change makes it so that it only calls sysHugePage if the original
huge page counts between the span to be coalesced into and its neighbors
do not add up (i.e. a new huge page was created due to alignment). Calls
to sysHugePage will now happen much less frequently, as intended.

Updates #32828.

Change-Id: Ia175919cb79b730a658250425f97189e27d7fda3
Reviewed-on: https://go-review.googlesource.com/c/go/+/186926
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Michael Knyszek 2019-07-19 19:35:24 -04:00 committed by Austin Clements
parent a41ebe6e25
commit 8c3040d768

View File

@ -514,11 +514,13 @@ func (h *mheap) coalesce(s *mspan) {
h.free.insert(other)
}
hpBefore := s.hugePages()
hpMiddle := s.hugePages()
// Coalesce with earlier, later spans.
var hpBefore uintptr
if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
if s.scavenged == before.scavenged {
hpBefore = before.hugePages()
merge(before, s, before)
} else {
realign(before, s, before)
@ -526,23 +528,29 @@ func (h *mheap) coalesce(s *mspan) {
}
// Now check to see if next (greater addresses) span is free and can be coalesced.
var hpAfter uintptr
if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree {
if s.scavenged == after.scavenged {
hpAfter = after.hugePages()
merge(s, after, after)
} else {
realign(s, after, after)
}
}
if !s.scavenged && s.hugePages() > hpBefore {
if !s.scavenged && s.hugePages() > hpBefore+hpMiddle+hpAfter {
// If s has grown such that it now may contain more huge pages than it
// did before, then mark the whole region as huge-page-backable.
// and its now-coalesced neighbors did before, then mark the whole region
// as huge-page-backable.
//
// Otherwise, on systems where we break up huge pages (like Linux)
// s may not be backed by huge pages because it could be made up of
// pieces which are broken up in the underlying VMA. The primary issue
// with this is that it can lead to a poor estimate of the amount of
// free memory backed by huge pages for determining the scavenging rate.
//
// TODO(mknyszek): Measure the performance characteristics of sysHugePage
// and determine whether it makes sense to only sysHugePage on the pages
// that matter, or if it's better to just mark the whole region.
sysHugePage(unsafe.Pointer(s.base()), s.npages*pageSize)
}
}