1
0
mirror of https://github.com/golang/go synced 2024-11-06 22:46:14 -07:00

runtime: add scavenging code for new page allocator

This change adds a scavenger for the new page allocator along with
tests. The scavenger walks over the heap backwards once per GC, looking
for memory to scavenge. It walks across the heap without any lock held,
searching optimistically. If it finds what appears to be a scavenging
candidate it acquires the heap lock and attempts to verify it. Upon
verification it then scavenges.

Notably, unlike the old scavenger, it doesn't show any preference for
huge pages and instead follows a more strict last-page-first policy.

Updates #35112.

Change-Id: I0621ef73c999a471843eab2d1307ae5679dd18d6
Reviewed-on: https://go-review.googlesource.com/c/go/+/195697
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Michael Anthony Knyszek 2019-08-21 00:24:25 +00:00 committed by Michael Knyszek
parent 39e8cb0faa
commit 73317080e1
7 changed files with 880 additions and 21 deletions

View File

@ -38,7 +38,7 @@ var Nanotime = nanotime
var NetpollBreak = netpollBreak
var Usleep = usleep
var PageSize = pageSize
var PhysPageSize = physPageSize
var PhysHugePageSize = physHugePageSize
var NetpollGenericInit = netpollGenericInit
@ -733,6 +733,7 @@ func RunGetgThreadSwitchTest() {
}
const (
PageSize = pageSize
PallocChunkPages = pallocChunkPages
)
@ -825,6 +826,26 @@ func StringifyPallocBits(b *PallocBits, r BitRange) string {
return str
}
// Expose pallocData for testing.
type PallocData pallocData
func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
}
func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
func (d *PallocData) ScavengedSetRange(i, n uint) {
(*pallocData)(d).scavenged.setRange(i, n)
}
func (d *PallocData) PallocBits() *PallocBits {
return (*PallocBits)(&(*pallocData)(d).pallocBits)
}
func (d *PallocData) Scavenged() *PallocBits {
return (*PallocBits)(&(*pallocData)(d).scavenged)
}
// Expose fillAligned for testing.
func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
// Expose chunk index type.
type ChunkIdx chunkIdx
@ -837,8 +858,14 @@ func (p *PageAlloc) Free(base, npages uintptr) { (*pageAlloc)(p).free(base, n
func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
}
func (p *PageAlloc) PallocBits(i ChunkIdx) *PallocBits {
return (*PallocBits)(&((*pageAlloc)(p).chunks[i]))
func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
return (*PallocData)(&((*pageAlloc)(p).chunks[i]))
}
func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
systemstack(func() {
r = (*pageAlloc)(p).scavenge(nbytes)
})
return
}
// BitRange represents a range over a bitmap.
@ -847,14 +874,25 @@ type BitRange struct {
}
// NewPageAlloc creates a new page allocator for testing and
// initializes it with the chunks map. Each key represents a chunk
// index and each value is a series of bit ranges to set within that
// chunk.
func NewPageAlloc(chunks map[ChunkIdx][]BitRange) *PageAlloc {
// initializes it with the scav and chunks maps. Each key in these maps
// represents a chunk index and each value is a series of bit ranges to
// set within each bitmap's chunk.
//
// The initialization of the pageAlloc preserves the invariant that if a
// scavenged bit is set the alloc bit is necessarily unset, so some
// of the bits described by scav may be cleared in the final bitmap if
// ranges in chunks overlap with them.
//
// scav is optional, and if nil, the scavenged bitmap will be cleared
// (as opposed to all 1s, which it usually is). Furthermore, every
// chunk index in scav must appear in chunks; ones that do not are
// ignored.
func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
p := new(pageAlloc)
// We've got an entry, so initialize the pageAlloc.
p.init(new(mutex), nil)
p.test = true
for i, init := range chunks {
addr := chunkBase(chunkIdx(i))
@ -864,6 +902,25 @@ func NewPageAlloc(chunks map[ChunkIdx][]BitRange) *PageAlloc {
// Initialize the bitmap and update pageAlloc metadata.
chunk := &p.chunks[chunkIndex(addr)]
// Clear all the scavenged bits which grow set.
chunk.scavenged.clearRange(0, pallocChunkPages)
// Apply scavenge state if applicable.
if scav != nil {
if scvg, ok := scav[i]; ok {
for _, s := range scvg {
// Ignore the case of s.N == 0. setRange doesn't handle
// it and it's a no-op anyway.
if s.N != 0 {
chunk.scavenged.setRange(s.I, s.N)
}
}
}
}
p.resetScavengeAddr()
// Apply alloc state.
for _, s := range init {
// Ignore the case of s.N == 0. allocRange doesn't handle
// it and it's a no-op anyway.

View File

@ -308,7 +308,7 @@ const (
//
// On other platforms, the user address space is contiguous
// and starts at 0, so no offset is necessary.
arenaBaseOffset uintptr = sys.GoarchAmd64 * (1 << 47)
arenaBaseOffset = sys.GoarchAmd64 * (1 << 47)
// Max number of threads to run garbage collection.
// 2, 3, and 4 are all plausible maximums depending

View File

@ -55,6 +55,11 @@
package runtime
import (
"math/bits"
"unsafe"
)
const (
// The background scavenger is paced according to these parameters.
//
@ -408,3 +413,367 @@ func bgscavenge(c chan int) {
Gosched()
}
}
// scavenge scavenges nbytes worth of free pages, starting with the
// highest address first. Successive calls continue from where it left
// off until the heap is exhausted. Call resetScavengeAddr to bring it
// back to the top of the heap.
//
// Returns the amount of memory scavenged in bytes.
//
// s.mheapLock must not be locked.
//
// Must run on the system stack because scavengeOne must run on the
// system stack.
//
//go:systemstack
func (s *pageAlloc) scavenge(nbytes uintptr) uintptr {
released := uintptr(0)
for released < nbytes {
r := s.scavengeOne(nbytes - released)
if r == 0 {
// Nothing left to scavenge! Give up.
break
}
released += r
}
return released
}
// resetScavengeAddr sets the scavenge start address to the top of the heap's
// address space. This should be called each time the scavenger's pacing
// changes.
//
// s.mheapLock must be held.
func (s *pageAlloc) resetScavengeAddr() {
s.scavAddr = chunkBase(s.end) - 1
}
// scavengeOne starts from s.scavAddr and walks down the heap until it finds
// a contiguous run of pages to scavenge. It will try to scavenge at most
// max bytes at once, but may scavenge more to avoid breaking huge pages. Once
// it scavenges some memory it returns how much it scavenged and updates s.scavAddr
// appropriately. s.scavAddr must be reset manually and externally.
//
// Should it exhaust the heap, it will return 0 and set s.scavAddr to minScavAddr.
//
// s.mheapLock must not be locked. Must be run on the system stack because it
// acquires the heap lock.
//
//go:systemstack
func (s *pageAlloc) scavengeOne(max uintptr) uintptr {
// Calculate the maximum number of pages to scavenge.
//
// This should be alignUp(max, pageSize) / pageSize but max can and will
// be ^uintptr(0), so we need to be very careful not to overflow here.
// Rather than use alignUp, calculate the number of pages rounded down
// first, then add back one if necessary.
maxPages := max / pageSize
if max%pageSize != 0 {
maxPages++
}
// Calculate the minimum number of pages we can scavenge.
//
// Because we can only scavenge whole physical pages, we must
// ensure that we scavenge at least minPages each time, aligned
// to minPages*pageSize.
minPages := physPageSize / pageSize
if minPages < 1 {
minPages = 1
}
lock(s.mheapLock)
top := chunkIndex(s.scavAddr)
if top < s.start {
unlock(s.mheapLock)
return 0
}
// Check the chunk containing the scav addr, starting at the addr
// and see if there are any free and unscavenged pages.
ci := chunkIndex(s.scavAddr)
base, npages := s.chunks[ci].findScavengeCandidate(chunkPageIndex(s.scavAddr), minPages, maxPages)
// If we found something, scavenge it and return!
if npages != 0 {
s.scavengeRangeLocked(ci, base, npages)
unlock(s.mheapLock)
return uintptr(npages) * pageSize
}
unlock(s.mheapLock)
// Slow path: iterate optimistically looking for any free and unscavenged page.
// If we think we see something, stop and verify it!
for i := top - 1; i >= s.start; i-- {
// If this chunk is totally in-use or has no unscavenged pages, don't bother
// doing a more sophisticated check.
//
// Note we're accessing the summary and the chunks without a lock, but
// that's fine. We're being optimistic anyway.
// Check if there are enough free pages at all. It's imperative that we
// check this before the chunk itself so that we quickly skip over
// unused parts of the address space, which may have a cleared bitmap
// but a zero'd summary which indicates not to allocate from there.
if s.summary[len(s.summary)-1][i].max() < uint(minPages) {
continue
}
// Run over the chunk looking harder for a candidate. Again, we could
// race with a lot of different pieces of code, but we're just being
// optimistic.
if !s.chunks[i].hasScavengeCandidate(minPages) {
continue
}
// We found a candidate, so let's lock and verify it.
lock(s.mheapLock)
// Find, verify, and scavenge if we can.
chunk := &s.chunks[i]
base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages)
if npages > 0 {
// We found memory to scavenge! Mark the bits and report that up.
s.scavengeRangeLocked(i, base, npages)
unlock(s.mheapLock)
return uintptr(npages) * pageSize
}
// We were fooled, let's take this opportunity to move the scavAddr
// all the way down to where we searched as scavenged for future calls
// and keep iterating.
s.scavAddr = chunkBase(i-1) + pallocChunkPages*pageSize - 1
unlock(s.mheapLock)
}
lock(s.mheapLock)
// We couldn't find anything, so signal that there's nothing left
// to scavenge.
s.scavAddr = minScavAddr
unlock(s.mheapLock)
return 0
}
// scavengeRangeLocked scavenges the given region of memory.
//
// s.mheapLock must be held.
func (s *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) {
s.chunks[ci].scavenged.setRange(base, npages)
// Compute the full address for the start of the range.
addr := chunkBase(ci) + uintptr(base)*pageSize
// Update the scav pointer.
s.scavAddr = addr - 1
// Only perform the actual scavenging if we're not in a test.
// It's dangerous to do so otherwise.
if s.test {
return
}
sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize)
// Update global accounting only when not in test, otherwise
// the runtime's accounting will be wrong.
memstats.heap_released += uint64(npages) * pageSize
}
// fillAligned returns x but with all zeroes in m-aligned
// groups of m bits set to 1 if any bit in the group is non-zero.
//
// For example, fillAligned(0x0100a3, 8) == 0xff00ff.
//
// Note that if m == 1, this is a no-op.
//
// m must be a power of 2 <= 64.
func fillAligned(x uint64, m uint) uint64 {
apply := func(x uint64, c uint64) uint64 {
// The technique used it here is derived from
// https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
// and extended for more than just bytes (like nibbles
// and uint16s) by using an appropriate constant.
//
// To summarize the technique, quoting from that page:
// "[It] works by first zeroing the high bits of the [8]
// bytes in the word. Subsequently, it adds a number that
// will result in an overflow to the high bit of a byte if
// any of the low bits were initialy set. Next the high
// bits of the original word are ORed with these values;
// thus, the high bit of a byte is set iff any bit in the
// byte was set. Finally, we determine if any of these high
// bits are zero by ORing with ones everywhere except the
// high bits and inverting the result."
return ^((((x & c) + c) | x) | c)
}
// Transform x to contain a 1 bit at the top of each m-aligned
// group of m zero bits.
switch m {
case 1:
return x
case 2:
x = apply(x, 0x5555555555555555)
case 4:
x = apply(x, 0x7777777777777777)
case 8:
x = apply(x, 0x7f7f7f7f7f7f7f7f)
case 16:
x = apply(x, 0x7fff7fff7fff7fff)
case 32:
x = apply(x, 0x7fffffff7fffffff)
case 64:
x = apply(x, 0x7fffffffffffffff)
}
// Now, the top bit of each m-aligned group in x is set
// that group was all zero in the original x.
// From each group of m bits subtract 1.
// Because we know only the top bits of each
// m-aligned group are set, we know this will
// set each group to have all the bits set except
// the top bit, so just OR with the original
// result to set all the bits.
return ^((x - (x >> (m - 1))) | x)
}
// hasScavengeCandidate returns true if there's any min-page-aligned groups of
// min pages of free-and-unscavenged memory in the region represented by this
// pallocData.
//
// min must be a non-zero power of 2 <= 64.
func (m *pallocData) hasScavengeCandidate(min uintptr) bool {
if min&(min-1) != 0 || min == 0 {
print("runtime: min = ", min, "\n")
throw("min must be a non-zero power of 2")
} else if min > 64 {
print("runtime: min = ", min, "\n")
throw("physical page sizes > 512 KiB are not supported")
}
// The goal of this search is to see if the chunk contains any free and unscavenged memory.
for i := len(m.scavenged) - 1; i >= 0; i-- {
// 1s are scavenged OR non-free => 0s are unscavenged AND free
//
// TODO(mknyszek): Consider splitting up fillAligned into two
// functions, since here we technically could get by with just
// the first half of its computation. It'll save a few instructions
// but adds some additional code complexity.
x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min))
// Quickly skip over chunks of non-free or scavenged pages.
if x != ^uint64(0) {
return true
}
}
return false
}
// findScavengeCandidate returns a start index and a size for this pallocData
// segment which represents a contiguous region of free and unscavenged memory.
//
// searchIdx indicates the page index within this chunk to start the search, but
// note that findScavengeCandidate searches backwards through the pallocData. As a
// a result, it will return the highest scavenge candidate in address order.
//
// min indicates a hard minimum size and alignment for runs of pages. That is,
// findScavengeCandidate will not return a region smaller than min pages in size,
// or that is min pages or greater in size but not aligned to min. min must be
// a non-zero power of 2 <= 64.
//
// max is a hint for how big of a region is desired. If max >= pallocChunkPages, then
// findScavengeCandidate effectively returns entire free and unscavenged regions.
// If max < pallocChunkPages, it may truncate the returned region such that size is
// max. However, findScavengeCandidate may still return a larger region if, for
// example, it chooses to preserve huge pages. That is, even if max is small,
// size is not guaranteed to be equal to max. max is allowed to be less than min,
// in which case it is as if max == min.
func (m *pallocData) findScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
if min&(min-1) != 0 || min == 0 {
print("runtime: min = ", min, "\n")
throw("min must be a non-zero power of 2")
} else if min > 64 {
print("runtime: min = ", min, "\n")
throw("physical page sizes > 512 KiB are not supported")
}
// max is allowed to be less than min, but we need to ensure
// we never truncate further than min.
if max < min {
max = min
}
i := int(searchIdx / 64)
// Start by quickly skipping over blocks of non-free or scavenged pages.
for ; i >= 0; i-- {
// 1s are scavenged OR non-free => 0s are unscavenged AND free
x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min))
if x != ^uint64(0) {
break
}
}
if i < 0 {
// Failed to find any free/unscavenged pages.
return 0, 0
}
// We have something in the 64-bit chunk at i, but it could
// extend further. Loop until we find the extent of it.
// 1s are scavenged OR non-free => 0s are unscavenged AND free
x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min))
z1 := uint(bits.LeadingZeros64(^x))
run, end := uint(0), uint(i)*64+(64-z1)
if x<<z1 != 0 {
// After shifting out z1 bits, we still have 1s,
// so the run ends inside this word.
run = uint(bits.LeadingZeros64(x << z1))
} else {
// After shifting out z1 bits, we have no more 1s.
// This means the run extends to the bottom of the
// word so it may extend into further words.
run = 64 - z1
for j := i - 1; j >= 0; j-- {
x := fillAligned(m.scavenged[j]|m.pallocBits[j], uint(min))
run += uint(bits.LeadingZeros64(x))
if x != 0 {
// The run stopped in this word.
break
}
}
}
// Split the run we found if it's larger than max but hold on to
// our original length, since we may need it later.
size := run
if size > uint(max) {
size = uint(max)
}
start := end - size
if physHugePageSize > pageSize && physHugePageSize > physPageSize {
// We have huge pages, so let's ensure we don't break one by scavenging
// over a huge page boundary. If the range [start, start+size) overlaps with
// a free-and-unscavenged huge page, we want to grow the region we scavenge
// to include that huge page.
// Compute the huge page boundary above our candidate.
pagesPerHugePage := uintptr(physHugePageSize / pageSize)
hugePageAbove := uint(alignUp(uintptr(start), pagesPerHugePage))
// If that boundary is within our current candidate, then we may be breaking
// a huge page.
if hugePageAbove <= end {
// Compute the huge page boundary below our candidate.
hugePageBelow := uint(alignDown(uintptr(start), pagesPerHugePage))
if hugePageBelow >= end-run {
// We're in danger of breaking apart a huge page since start+size crosses
// a huge page boundary and rounding down start to the nearest huge
// page boundary is included in the full run we found. Include the entire
// huge page in the bound by rounding down to the huge page size.
size = size + (start - hugePageBelow)
start = hugePageBelow
}
}
}
return start, size
}

View File

@ -0,0 +1,382 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"fmt"
"math/rand"
. "runtime"
"testing"
)
// makePallocData produces an initialized PallocData by setting
// the ranges of described in alloc and scavenge.
func makePallocData(alloc, scavenged []BitRange) *PallocData {
b := new(PallocData)
for _, v := range alloc {
if v.N == 0 {
// Skip N==0. It's harmless and allocRange doesn't
// handle this case.
continue
}
b.AllocRange(v.I, v.N)
}
for _, v := range scavenged {
if v.N == 0 {
// See the previous loop.
continue
}
b.ScavengedSetRange(v.I, v.N)
}
return b
}
func TestFillAligned(t *testing.T) {
fillAlignedSlow := func(x uint64, m uint) uint64 {
if m == 1 {
return x
}
out := uint64(0)
for i := uint(0); i < 64; i += m {
for j := uint(0); j < m; j++ {
if x&(uint64(1)<<(i+j)) != 0 {
out |= ((uint64(1) << m) - 1) << i
break
}
}
}
return out
}
check := func(x uint64, m uint) {
want := fillAlignedSlow(x, m)
if got := FillAligned(x, m); got != want {
t.Logf("got: %064b", got)
t.Logf("want: %064b", want)
t.Errorf("bad fillAligned(%016x, %d)", x, m)
}
}
for m := uint(1); m <= 64; m *= 2 {
tests := []uint64{
0x0000000000000000,
0x00000000ffffffff,
0xffffffff00000000,
0x8000000000000001,
0xf00000000000000f,
0xf00000010050000f,
0xffffffffffffffff,
0x0000000000000001,
0x0000000000000002,
0x0000000000000008,
uint64(1) << (m - 1),
uint64(1) << m,
// Try a few fixed arbitrary examples.
0xb02b9effcf137016,
0x3975a076a9fbff18,
0x0f8c88ec3b81506e,
0x60f14d80ef2fa0e6,
}
for _, test := range tests {
check(test, m)
}
for i := 0; i < 1000; i++ {
// Try a pseudo-random numbers.
check(rand.Uint64(), m)
if m > 1 {
// For m != 1, let's construct a slightly more interesting
// random test. Generate a bitmap which is either 0 or
// randomly set bits for each m-aligned group of m bits.
val := uint64(0)
for n := uint(0); n < 64; n += m {
// For each group of m bits, flip a coin:
// * Leave them as zero.
// * Set them randomly.
if rand.Uint64()%2 == 0 {
val |= (rand.Uint64() & ((1 << m) - 1)) << n
}
}
check(val, m)
}
}
}
}
func TestPallocDataFindScavengeCandidate(t *testing.T) {
type test struct {
alloc, scavenged []BitRange
min, max uintptr
want BitRange
}
tests := map[string]test{
"MixedMin1": {
alloc: []BitRange{{0, 40}, {42, PallocChunkPages - 42}},
scavenged: []BitRange{{0, 41}, {42, PallocChunkPages - 42}},
min: 1,
max: PallocChunkPages,
want: BitRange{41, 1},
},
"MultiMin1": {
alloc: []BitRange{{0, 63}, {65, 20}, {87, PallocChunkPages - 87}},
scavenged: []BitRange{{86, 1}},
min: 1,
max: PallocChunkPages,
want: BitRange{85, 1},
},
}
// Try out different page minimums.
for m := uintptr(1); m <= 64; m *= 2 {
suffix := fmt.Sprintf("Min%d", m)
tests["AllFree"+suffix] = test{
min: m,
max: PallocChunkPages,
want: BitRange{0, PallocChunkPages},
}
tests["AllScavenged"+suffix] = test{
scavenged: []BitRange{{0, PallocChunkPages}},
min: m,
max: PallocChunkPages,
want: BitRange{0, 0},
}
tests["NoneFree"+suffix] = test{
alloc: []BitRange{{0, PallocChunkPages}},
scavenged: []BitRange{{PallocChunkPages / 2, PallocChunkPages / 2}},
min: m,
max: PallocChunkPages,
want: BitRange{0, 0},
}
tests["StartFree"+suffix] = test{
alloc: []BitRange{{uint(m), PallocChunkPages - uint(m)}},
min: m,
max: PallocChunkPages,
want: BitRange{0, uint(m)},
}
tests["StartFree"+suffix] = test{
alloc: []BitRange{{uint(m), PallocChunkPages - uint(m)}},
min: m,
max: PallocChunkPages,
want: BitRange{0, uint(m)},
}
tests["EndFree"+suffix] = test{
alloc: []BitRange{{0, PallocChunkPages - uint(m)}},
min: m,
max: PallocChunkPages,
want: BitRange{PallocChunkPages - uint(m), uint(m)},
}
tests["Straddle64"+suffix] = test{
alloc: []BitRange{{0, 64 - uint(m)}, {64 + uint(m), PallocChunkPages - (64 + uint(m))}},
min: m,
max: 2 * m,
want: BitRange{64 - uint(m), 2 * uint(m)},
}
tests["BottomEdge64WithFull"+suffix] = test{
alloc: []BitRange{{64, 64}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}},
scavenged: []BitRange{{1, 10}},
min: m,
max: 3 * m,
want: BitRange{128, 3 * uint(m)},
}
tests["BottomEdge64WithPocket"+suffix] = test{
alloc: []BitRange{{64, 62}, {127, 1}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}},
scavenged: []BitRange{{1, 10}},
min: m,
max: 3 * m,
want: BitRange{128, 3 * uint(m)},
}
if m <= 8 {
tests["OneFree"] = test{
alloc: []BitRange{{0, 40}, {40 + uint(m), PallocChunkPages - (40 + uint(m))}},
min: m,
max: PallocChunkPages,
want: BitRange{40, uint(m)},
}
tests["OneScavenged"] = test{
alloc: []BitRange{{0, 40}, {40 + uint(m), PallocChunkPages - (40 + uint(m))}},
scavenged: []BitRange{{40, 1}},
min: m,
max: PallocChunkPages,
want: BitRange{0, 0},
}
}
if m > 1 {
tests["SkipSmall"+suffix] = test{
alloc: []BitRange{{0, 64 - uint(m)}, {64, 5}, {70, 11}, {82, PallocChunkPages - 82}},
min: m,
max: m,
want: BitRange{64 - uint(m), uint(m)},
}
tests["SkipMisaligned"+suffix] = test{
alloc: []BitRange{{0, 64 - uint(m)}, {64, 63}, {127 + uint(m), PallocChunkPages - (127 + uint(m))}},
min: m,
max: m,
want: BitRange{64 - uint(m), uint(m)},
}
tests["MaxLessThan"+suffix] = test{
scavenged: []BitRange{{0, PallocChunkPages - uint(m)}},
min: m,
max: 1,
want: BitRange{PallocChunkPages - uint(m), uint(m)},
}
}
}
if PhysHugePageSize > uintptr(PageSize) {
// Check hugepage preserving behavior.
bits := uint(PhysHugePageSize / uintptr(PageSize))
tests["PreserveHugePageBottom"] = test{
alloc: []BitRange{{bits + 2, PallocChunkPages - (bits + 2)}},
min: 1,
max: 3, // Make it so that max would have us try to break the huge page.
want: BitRange{0, bits + 2},
}
if bits >= 3*PallocChunkPages {
// We need at least 3 huge pages in an arena for this test to make sense.
tests["PreserveHugePageMiddle"] = test{
alloc: []BitRange{{0, bits - 10}, {2*bits + 10, PallocChunkPages - (2*bits + 10)}},
min: 1,
max: 12, // Make it so that max would have us try to break the huge page.
want: BitRange{bits, bits + 10},
}
}
tests["PreserveHugePageTop"] = test{
alloc: []BitRange{{0, PallocChunkPages - bits}},
min: 1,
max: 1, // Even one page would break a huge page in this case.
want: BitRange{PallocChunkPages - bits, bits},
}
}
for name, v := range tests {
v := v
t.Run(name, func(t *testing.T) {
b := makePallocData(v.alloc, v.scavenged)
start, size := b.FindScavengeCandidate(PallocChunkPages-1, v.min, v.max)
got := BitRange{start, size}
if !(got.N == 0 && v.want.N == 0) && got != v.want {
t.Fatalf("candidate mismatch: got %v, want %v", got, v.want)
}
})
}
}
// Tests end-to-end scavenging on a pageAlloc.
func TestPageAllocScavenge(t *testing.T) {
type test struct {
request, expect uintptr
}
minPages := PhysPageSize / PageSize
if minPages < 1 {
minPages = 1
}
tests := map[string]struct {
beforeAlloc map[ChunkIdx][]BitRange
beforeScav map[ChunkIdx][]BitRange
expect []test
afterScav map[ChunkIdx][]BitRange
}{
"AllFreeUnscavExhaust": {
beforeAlloc: map[ChunkIdx][]BitRange{
BaseChunkIdx: {},
BaseChunkIdx + 1: {},
BaseChunkIdx + 2: {},
},
beforeScav: map[ChunkIdx][]BitRange{
BaseChunkIdx: {},
BaseChunkIdx + 1: {},
BaseChunkIdx + 2: {},
},
expect: []test{
{^uintptr(0), 3 * PallocChunkPages * PageSize},
},
afterScav: map[ChunkIdx][]BitRange{
BaseChunkIdx: {{0, PallocChunkPages}},
BaseChunkIdx + 1: {{0, PallocChunkPages}},
BaseChunkIdx + 2: {{0, PallocChunkPages}},
},
},
"NoneFreeUnscavExhaust": {
beforeAlloc: map[ChunkIdx][]BitRange{
BaseChunkIdx: {{0, PallocChunkPages}},
BaseChunkIdx + 1: {},
BaseChunkIdx + 2: {{0, PallocChunkPages}},
},
beforeScav: map[ChunkIdx][]BitRange{
BaseChunkIdx: {},
BaseChunkIdx + 1: {{0, PallocChunkPages}},
BaseChunkIdx + 2: {},
},
expect: []test{
{^uintptr(0), 0},
},
afterScav: map[ChunkIdx][]BitRange{
BaseChunkIdx: {},
BaseChunkIdx + 1: {{0, PallocChunkPages}},
BaseChunkIdx + 2: {},
},
},
"ScavHighestPageFirst": {
beforeAlloc: map[ChunkIdx][]BitRange{
BaseChunkIdx: {},
},
beforeScav: map[ChunkIdx][]BitRange{
BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
},
expect: []test{
{1, minPages * PageSize},
},
afterScav: map[ChunkIdx][]BitRange{
BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(minPages)}},
},
},
"ScavMultiple": {
beforeAlloc: map[ChunkIdx][]BitRange{
BaseChunkIdx: {},
},
beforeScav: map[ChunkIdx][]BitRange{
BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
},
expect: []test{
{minPages * PageSize, minPages * PageSize},
{minPages * PageSize, minPages * PageSize},
},
afterScav: map[ChunkIdx][]BitRange{
BaseChunkIdx: {{0, PallocChunkPages}},
},
},
"ScavMultiple2": {
beforeAlloc: map[ChunkIdx][]BitRange{
BaseChunkIdx: {},
BaseChunkIdx + 1: {},
},
beforeScav: map[ChunkIdx][]BitRange{
BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
BaseChunkIdx + 1: {{0, PallocChunkPages - uint(2*minPages)}},
},
expect: []test{
{2 * minPages * PageSize, 2 * minPages * PageSize},
{minPages * PageSize, minPages * PageSize},
{minPages * PageSize, minPages * PageSize},
},
afterScav: map[ChunkIdx][]BitRange{
BaseChunkIdx: {{0, PallocChunkPages}},
BaseChunkIdx + 1: {{0, PallocChunkPages}},
},
},
}
for name, v := range tests {
v := v
t.Run(name, func(t *testing.T) {
b := NewPageAlloc(v.beforeAlloc, v.beforeScav)
defer FreePageAlloc(b)
for iter, h := range v.expect {
if got := b.Scavenge(h.request); got != h.expect {
t.Fatalf("bad scavenge #%d: want %d, got %d", iter+1, h.expect, got)
}
}
want := NewPageAlloc(v.beforeAlloc, v.afterScav)
defer FreePageAlloc(want)
checkPageAlloc(t, want, b)
})
}
}

View File

@ -80,6 +80,11 @@ const (
// value in the shifted address space, but searchAddr is stored as a regular
// memory address. See arenaBaseOffset for details.
maxSearchAddr = ^uintptr(0) - arenaBaseOffset
// Minimum scavAddr value, which indicates that the scavenger is done.
//
// minScavAddr + arenaBaseOffset == 0
minScavAddr = (^arenaBaseOffset + 1) & uintptrMask
)
// Global chunk index.
@ -171,7 +176,7 @@ type pageAlloc struct {
// TODO(mknyszek): Consider changing the definition of the bitmap
// such that 1 means free and 0 means in-use so that summaries and
// the bitmaps align better on zero-values.
chunks []pallocBits
chunks []pallocData
// The address to start an allocation search with.
//
@ -185,6 +190,9 @@ type pageAlloc struct {
// space on architectures with segmented address spaces.
searchAddr uintptr
// The address to start a scavenge candidate search with.
scavAddr uintptr
// start and end represent the chunk indices
// which pageAlloc knows about. It assumes
// chunks in the range [start, end) are
@ -198,6 +206,9 @@ type pageAlloc struct {
// sysStat is the runtime memstat to update when new system
// memory is committed by the pageAlloc for allocation metadata.
sysStat *uint64
// Whether or not this struct is being used in tests.
test bool
}
func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
@ -217,6 +228,9 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
// Start with the searchAddr in a state indicating there's no free memory.
s.searchAddr = maxSearchAddr
// Start with the scavAddr in a state indicating there's nothing more to do.
s.scavAddr = minScavAddr
// Reserve space for the bitmap and put this reservation
// into the chunks slice.
const maxChunks = (1 << heapAddrBits) / pallocChunkBytes
@ -225,7 +239,7 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
throw("failed to reserve page bitmap memory")
}
sl := notInHeapSlice{(*notInHeap)(r), 0, maxChunks}
s.chunks = *(*[]pallocBits)(unsafe.Pointer(&sl))
s.chunks = *(*[]pallocData)(unsafe.Pointer(&sl))
// Set the mheapLock.
s.mheapLock = mheapLock
@ -350,6 +364,13 @@ func (s *pageAlloc) grow(base, size uintptr) {
s.searchAddr = base
}
// Newly-grown memory is always considered scavenged.
//
// Set all the bits in the scavenged bitmaps high.
for c := chunkIndex(base); c < chunkIndex(limit); c++ {
s.chunks[c].scavenged.setRange(0, pallocChunkPages)
}
// Update summaries accordingly. The grow acts like a free, so
// we need to ensure this newly-free memory is visible in the
// summaries.

View File

@ -23,8 +23,12 @@ func checkPageAlloc(t *testing.T, want, got *PageAlloc) {
for i := gotStart; i < gotEnd; i++ {
// Check the bitmaps.
if !checkPallocBits(t, got.PallocBits(i), want.PallocBits(i)) {
t.Logf("in chunk %d", i)
gb, wb := got.PallocData(i), want.PallocData(i)
if !checkPallocBits(t, gb.PallocBits(), wb.PallocBits()) {
t.Logf("in chunk %d (mallocBits)", i)
}
if !checkPallocBits(t, gb.Scavenged(), wb.Scavenged()) {
t.Logf("in chunk %d (scavenged)", i)
}
}
// TODO(mknyszek): Verify summaries too?
@ -310,7 +314,7 @@ func TestPageAllocAlloc(t *testing.T) {
for name, v := range tests {
v := v
t.Run(name, func(t *testing.T) {
b := NewPageAlloc(v.before)
b := NewPageAlloc(v.before, nil)
defer FreePageAlloc(b)
for iter, i := range v.hits {
@ -318,7 +322,7 @@ func TestPageAllocAlloc(t *testing.T) {
t.Fatalf("bad alloc #%d: want 0x%x, got 0x%x", iter+1, i.base, a)
}
}
want := NewPageAlloc(v.after)
want := NewPageAlloc(v.after, nil)
defer FreePageAlloc(want)
checkPageAlloc(t, want, b)
@ -335,7 +339,7 @@ func TestPageAllocExhaust(t *testing.T) {
for i := ChunkIdx(0); i < 4; i++ {
bDesc[BaseChunkIdx+i] = []BitRange{}
}
b := NewPageAlloc(bDesc)
b := NewPageAlloc(bDesc, nil)
defer FreePageAlloc(b)
// Allocate into b with npages until we've exhausted the heap.
@ -366,7 +370,7 @@ func TestPageAllocExhaust(t *testing.T) {
wantDesc[BaseChunkIdx+i] = []BitRange{}
}
}
want := NewPageAlloc(wantDesc)
want := NewPageAlloc(wantDesc, nil)
defer FreePageAlloc(want)
// Check to make sure the heap b matches what we want.
@ -590,14 +594,15 @@ func TestPageAllocFree(t *testing.T) {
for name, v := range tests {
v := v
t.Run(name, func(t *testing.T) {
b := NewPageAlloc(v.before)
b := NewPageAlloc(v.before, nil)
defer FreePageAlloc(b)
for _, addr := range v.frees {
b.Free(addr, v.npages)
}
want := NewPageAlloc(v.after)
want := NewPageAlloc(v.after, nil)
defer FreePageAlloc(want)
checkPageAlloc(t, want, b)
})
}
@ -641,7 +646,7 @@ func TestPageAllocAllocAndFree(t *testing.T) {
for name, v := range tests {
v := v
t.Run(name, func(t *testing.T) {
b := NewPageAlloc(v.init)
b := NewPageAlloc(v.init, nil)
defer FreePageAlloc(b)
for iter, i := range v.hits {

View File

@ -131,7 +131,7 @@ var consec8tab = [256]uint{
4, 3, 2, 2, 2, 1, 1, 1, 3, 2, 1, 1, 2, 1, 1, 0,
}
// summarize returns a packed summary of the bitmap in mallocBits.
// summarize returns a packed summary of the bitmap in pallocBits.
func (b *pallocBits) summarize() pallocSum {
// TODO(mknyszek): There may be something more clever to be done
// here to make the summarize operation more efficient. For example,
@ -332,3 +332,28 @@ func findBitRange64(c uint64, n uint) uint {
}
return i
}
// pallocData encapsulates pallocBits and a bitmap for
// whether or not a given page is scavenged in a single
// structure. It's effectively a pallocBits with
// additional functionality.
type pallocData struct {
pallocBits
scavenged pageBits
}
// allocRange sets bits [i, i+n) in the bitmap to 1 and
// updates the scavenged bits appropriately.
func (m *pallocData) allocRange(i, n uint) {
// Clear the scavenged bits when we alloc the range.
m.pallocBits.allocRange(i, n)
m.scavenged.clearRange(i, n)
}
// allocAll sets every bit in the bitmap to 1 and updates
// the scavenged bits appropriately.
func (m *pallocData) allocAll() {
// Clear the scavenged bits when we alloc the range.
m.pallocBits.allocAll()
m.scavenged.clearAll()
}