1
0
mirror of https://github.com/golang/go synced 2024-09-29 12:04:28 -06:00

runtime: track the number of free unscavenged huge pages

This change tracks the number of potential free and unscavenged huge
pages which will be used to inform the rate at which scavenging should
occur.

For #30333.

Change-Id: I47663e5ffb64cac44ffa10db158486783f707479
Reviewed-on: https://go-review.googlesource.com/c/go/+/170860
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Michael Anthony Knyszek 2019-04-29 21:02:18 +00:00 committed by Michael Knyszek
parent a62b5723be
commit f4a5ae5594
4 changed files with 50 additions and 2 deletions

View File

@ -36,6 +36,8 @@ var Atoi32 = atoi32
var Nanotime = nanotime
var PhysHugePageSize = physHugePageSize
type LFNode struct {
Next uint64
Pushcnt uintptr
@ -516,6 +518,26 @@ func MapTombstoneCheck(m map[int]int) {
}
}
// UnscavHugePagesSlow returns the value of mheap_.freeHugePages
// and the number of unscavenged huge pages calculated by
// scanning the heap.
func UnscavHugePagesSlow() (uintptr, uintptr) {
var base, slow uintptr
// Run on the system stack to avoid deadlock from stack growth
// trying to acquire the heap lock.
systemstack(func() {
lock(&mheap_.lock)
base = mheap_.free.unscavHugePages
for _, s := range mheap_.allspans {
if s.state == mSpanFree && !s.scavenged {
slow += s.hugePages()
}
}
unlock(&mheap_.lock)
})
return base, slow
}
// Span is a safe wrapper around an mspan, whose memory
// is managed manually.
type Span struct {

View File

@ -470,6 +470,25 @@ func TestReadMemStats(t *testing.T) {
}
}
func TestUnscavHugePages(t *testing.T) {
// Allocate 20 MiB and immediately free it a few times to increase
// the chance that unscavHugePages isn't zero and that some kind of
// accounting had to happen in the runtime.
for j := 0; j < 3; j++ {
var large [][]byte
for i := 0; i < 5; i++ {
large = append(large, make([]byte, runtime.PhysHugePageSize))
}
runtime.KeepAlive(large)
runtime.GC()
}
base, slow := runtime.UnscavHugePagesSlow()
if base != slow {
logDiff(t, "unscavHugePages", reflect.ValueOf(base), reflect.ValueOf(slow))
t.Fatal("unscavHugePages mismatch")
}
}
func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
typ := got.Type()
switch typ.Kind() {

View File

@ -40,7 +40,8 @@ import (
//go:notinheap
type mTreap struct {
treap *treapNode
treap *treapNode
unscavHugePages uintptr // number of unscavenged huge pages in the treap
}
//go:notinheap
@ -378,6 +379,9 @@ func (root *mTreap) end(mask, match treapIterType) treapIter {
// insert adds span to the large span treap.
func (root *mTreap) insert(span *mspan) {
if !span.scavenged {
root.unscavHugePages += span.hugePages()
}
base := span.base()
var last *treapNode
pt := &root.treap
@ -435,6 +439,9 @@ func (root *mTreap) insert(span *mspan) {
}
func (root *mTreap) removeNode(t *treapNode) {
if !t.span.scavenged {
root.unscavHugePages -= t.span.hugePages()
}
if t.span.base() != t.key {
throw("span and treap node base addresses do not match")
}

View File

@ -59,7 +59,7 @@ type mheap struct {
// on the swept stack.
sweepSpans [2]gcSweepBuf
// _ uint32 // align uint64 fields on 32-bit for atomics
_ uint32 // align uint64 fields on 32-bit for atomics
// Proportional sweep
//