1
0
mirror of https://github.com/golang/go synced 2024-11-24 02:50:11 -07:00

runtime: de-duplicate span scavenging

Currently, span scavenging was done nearly identically in two different
locations. This change deduplicates that into one shared routine.

For #14045.

Change-Id: I15006b2c9af0e70b7a9eae9abb4168d3adca3860
Reviewed-on: https://go-review.googlesource.com/c/139297
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Michael Anthony Knyszek 2018-10-02 20:23:41 +00:00 committed by Michael Knyszek
parent de31f637a9
commit e508a5f072

View File

@ -351,6 +351,34 @@ func (s *mspan) layout() (size, n, total uintptr) {
return
}
func (s *mspan) scavenge() uintptr {
start := s.base()
end := start + s.npages<<_PageShift
if physPageSize > _PageSize {
// We can only release pages in
// physPageSize blocks, so round start
// and end in. (Otherwise, madvise
// will round them *out* and release
// more memory than we want.)
start = (start + physPageSize - 1) &^ (physPageSize - 1)
end &^= physPageSize - 1
if end <= start {
// start and end don't span a
// whole physical page.
return 0
}
}
len := end - start
released := len - (s.npreleased << _PageShift)
if physPageSize > _PageSize && released == 0 {
return 0
}
memstats.heap_released += uint64(released)
s.npreleased = len >> _PageShift
sysUnused(unsafe.Pointer(start), len)
return released
}
// recordspan adds a newly allocated span to h.allspans.
//
// This only happens the first time a span is allocated from
@ -1087,35 +1115,12 @@ func (h *mheap) busyList(npages uintptr) *mSpanList {
func scavengeTreapNode(t *treapNode, now, limit uint64) uintptr {
s := t.spanKey
var sumreleased uintptr
if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
start := s.base()
end := start + s.npages<<_PageShift
if physPageSize > _PageSize {
// We can only release pages in
// physPageSize blocks, so round start
// and end in. (Otherwise, madvise
// will round them *out* and release
// more memory than we want.)
start = (start + physPageSize - 1) &^ (physPageSize - 1)
end &^= physPageSize - 1
if end <= start {
// start and end don't span a
// whole physical page.
return sumreleased
}
if released := s.scavenge(); released != 0 {
return released
}
len := end - start
released := len - (s.npreleased << _PageShift)
if physPageSize > _PageSize && released == 0 {
return sumreleased
}
memstats.heap_released += uint64(released)
sumreleased += released
s.npreleased = len >> _PageShift
sysUnused(unsafe.Pointer(start), len)
}
return sumreleased
return 0
}
func scavengelist(list *mSpanList, now, limit uint64) uintptr {
@ -1128,32 +1133,7 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr {
if (now-uint64(s.unusedsince)) <= limit || s.npreleased == s.npages {
continue
}
start := s.base()
end := start + s.npages<<_PageShift
if physPageSize > _PageSize {
// We can only release pages in
// physPageSize blocks, so round start
// and end in. (Otherwise, madvise
// will round them *out* and release
// more memory than we want.)
start = (start + physPageSize - 1) &^ (physPageSize - 1)
end &^= physPageSize - 1
if end <= start {
// start and end don't span a
// whole physical page.
continue
}
}
len := end - start
released := len - (s.npreleased << _PageShift)
if physPageSize > _PageSize && released == 0 {
continue
}
memstats.heap_released += uint64(released)
sumreleased += released
s.npreleased = len >> _PageShift
sysUnused(unsafe.Pointer(start), len)
sumreleased += s.scavenge()
}
return sumreleased
}