1
0
mirror of https://github.com/golang/go synced 2024-09-29 12:24:31 -06:00

runtime: ensure free and unscavenged spans may be backed by huge pages

This change adds a new sysHugePage function to provide the equivalent of
Linux's madvise(MADV_HUGEPAGE) support to the runtime. It then uses
sysHugePage to mark a newly-coalesced free span as backable by huge
pages to make the freeHugePages approximation a bit more accurate.

The problem being solved here is that if a large free span is composed
of many small spans which were coalesced together, then there's a chance
that they have had madvise(MADV_NOHUGEPAGE) called on them at some point,
which makes freeHugePages less accurate.

For #30333.

Change-Id: Idd4b02567619fc8d45647d9abd18da42f96f0522
Reviewed-on: https://go-review.googlesource.com/c/go/+/173338
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Michael Anthony Knyszek 2019-04-18 15:42:58 +00:00 committed by Michael Knyszek
parent 5c15ed64de
commit 31c4e09915
8 changed files with 44 additions and 9 deletions

View File

@ -35,6 +35,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
func sysUsed(v unsafe.Pointer, n uintptr) {
}
func sysHugePage(v unsafe.Pointer, n uintptr) {
}
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit

View File

@ -29,6 +29,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
func sysUsed(v unsafe.Pointer, n uintptr) {
}
func sysHugePage(v unsafe.Pointer, n uintptr) {
}
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit

View File

@ -33,6 +33,9 @@ func sysUsed(v unsafe.Pointer, n uintptr) {
madvise(v, n, _MADV_FREE_REUSE)
}
func sysHugePage(v unsafe.Pointer, n uintptr) {
}
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit

View File

@ -26,6 +26,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
func sysUsed(v unsafe.Pointer, n uintptr) {
}
func sysHugePage(v unsafe.Pointer, n uintptr) {
}
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit

View File

@ -117,16 +117,19 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
}
func sysUsed(v unsafe.Pointer, n uintptr) {
if physHugePageSize != 0 {
// Partially undo the NOHUGEPAGE marks from sysUnused
// for whole huge pages between v and v+n. This may
// leave huge pages off at the end points v and v+n
// even though allocations may cover these entire huge
// pages. We could detect this and undo NOHUGEPAGE on
// the end points as well, but it's probably not worth
// the cost because when neighboring allocations are
// freed sysUnused will just set NOHUGEPAGE again.
// Partially undo the NOHUGEPAGE marks from sysUnused
// for whole huge pages between v and v+n. This may
// leave huge pages off at the end points v and v+n
// even though allocations may cover these entire huge
// pages. We could detect this and undo NOHUGEPAGE on
// the end points as well, but it's probably not worth
// the cost because when neighboring allocations are
// freed sysUnused will just set NOHUGEPAGE again.
sysHugePage(v, n)
}
func sysHugePage(v unsafe.Pointer, n uintptr) {
if physHugePageSize != 0 {
// Round v up to a huge page boundary.
beg := (uintptr(v) + (physHugePageSize - 1)) &^ (physHugePageSize - 1)
// Round v+n down to a huge page boundary.

View File

@ -173,6 +173,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
func sysUsed(v unsafe.Pointer, n uintptr) {
}
func sysHugePage(v unsafe.Pointer, n uintptr) {
}
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
// sysReserve has already allocated all heap memory,
// but has not adjusted stats.

View File

@ -81,6 +81,9 @@ func sysUsed(v unsafe.Pointer, n uintptr) {
}
}
func sysHugePage(v unsafe.Pointer, n uintptr) {
}
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit

View File

@ -502,6 +502,8 @@ func (h *mheap) coalesce(s *mspan) {
h.free.insert(other)
}
hpBefore := s.hugePages()
// Coalesce with earlier, later spans.
if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
if s.scavenged == before.scavenged {
@ -519,6 +521,18 @@ func (h *mheap) coalesce(s *mspan) {
realign(s, after, after)
}
}
if !s.scavenged && s.hugePages() > hpBefore {
// If s has grown such that it now may contain more huge pages than it
// did before, then mark the whole region as huge-page-backable.
//
// Otherwise, on systems where we break up huge pages (like Linux)
// s may not be backed by huge pages because it could be made up of
// pieces which are broken up in the underlying VMA. The primary issue
// with this is that it can lead to a poor estimate of the amount of
// free memory backed by huge pages for determining the scavenging rate.
sysHugePage(unsafe.Pointer(s.base()), s.npages*pageSize)
}
}
// hugePages returns the number of aligned physical huge pages in the memory