1
0
mirror of https://github.com/golang/go synced 2024-11-11 22:10:22 -07:00

runtime: ensure that searchAddr always refers to inUse memory

This change formalizes an assumption made by the page allocator, which
is that (*pageAlloc).searchAddr should never refer to memory that is not
represented by (*pageAlloc).inUse. The portion of address space covered
by (*pageAlloc).inUse reflects the parts of the summary arrays which are
guaranteed to mapped, and so looking at any summary which is not
reflected there may cause a segfault.

In fact, this can happen today. This change thus also removes a
micro-optimization which is the only case which may cause
(*pageAlloc).searchAddr to point outside of any region covered by
(*pageAlloc).inUse, and adds a test verifying that the current segfault
can no longer occur.

Change-Id: I98b534f0ffba8656d3bd6d782f6fc22549ddf1c2
Reviewed-on: https://go-review.googlesource.com/c/go/+/216697
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
This commit is contained in:
Michael Anthony Knyszek 2020-01-28 19:59:19 +00:00 committed by Michael Knyszek
parent b13ce14c4a
commit e7f9e17b79
4 changed files with 53 additions and 13 deletions

View File

@ -582,6 +582,7 @@ const (
PageSize = pageSize
PallocChunkPages = pallocChunkPages
PageAlloc64Bit = pageAlloc64Bit
PallocSumBytes = pallocSumBytes
)
// Expose pallocSum for testing.

View File

@ -225,7 +225,9 @@ type pageAlloc struct {
// the bitmaps align better on zero-values.
chunks [1 << pallocChunksL1Bits]*[1 << pallocChunksL2Bits]pallocData
// The address to start an allocation search with.
// The address to start an allocation search with. It must never
// point to any memory that is not contained in inUse, i.e.
// inUse.contains(searchAddr) must always be true.
//
// When added with arenaBaseOffset, we guarantee that
// all valid heap addresses (when also added with
@ -237,7 +239,8 @@ type pageAlloc struct {
// space on architectures with segmented address spaces.
searchAddr uintptr
// The address to start a scavenge candidate search with.
// The address to start a scavenge candidate search with. It
// need not point to memory contained in inUse.
scavAddr uintptr
// The amount of memory scavenged since the last scavtrace print.

View File

@ -225,12 +225,13 @@ func TestPageAllocAlloc(t *testing.T) {
type hit struct {
npages, base, scav uintptr
}
tests := map[string]struct {
type test struct {
scav map[ChunkIdx][]BitRange
before map[ChunkIdx][]BitRange
after map[ChunkIdx][]BitRange
hits []hit
}{
}
tests := map[string]test{
"AllFree1": {
before: map[ChunkIdx][]BitRange{
BaseChunkIdx: {},
@ -371,7 +372,6 @@ func TestPageAllocAlloc(t *testing.T) {
BaseChunkIdx: {{0, 195}},
},
},
// TODO(mknyszek): Add tests close to the chunk size.
"ExhaustPallocChunkPages-3": {
before: map[ChunkIdx][]BitRange{
BaseChunkIdx: {},
@ -571,6 +571,48 @@ func TestPageAllocAlloc(t *testing.T) {
},
},
}
if PageAlloc64Bit != 0 {
const chunkIdxBigJump = 0x100000 // chunk index offset which translates to O(TiB)
// This test attempts to trigger a bug wherein we look at unmapped summary
// memory that isn't just in the case where we exhaust the heap.
//
// It achieves this by placing a chunk such that its summary will be
// at the very end of a physical page. It then also places another chunk
// much further up in the address space, such that any allocations into the
// first chunk do not exhaust the heap and the second chunk's summary is not in the
// page immediately adjacent to the first chunk's summary's page.
// Allocating into this first chunk to exhaustion and then into the second
// chunk may then trigger a check in the allocator which erroneously looks at
// unmapped summary memory and crashes.
// Figure out how many chunks are in a physical page, then align BaseChunkIdx
// to a physical page in the chunk summary array. Here we only assume that
// each summary array is aligned to some physical page.
sumsPerPhysPage := ChunkIdx(PhysPageSize / PallocSumBytes)
baseChunkIdx := BaseChunkIdx &^ (sumsPerPhysPage - 1)
tests["DiscontiguousMappedSumBoundary"] = test{
before: map[ChunkIdx][]BitRange{
baseChunkIdx + sumsPerPhysPage - 1: {},
baseChunkIdx + chunkIdxBigJump: {},
},
scav: map[ChunkIdx][]BitRange{
baseChunkIdx + sumsPerPhysPage - 1: {},
baseChunkIdx + chunkIdxBigJump: {},
},
hits: []hit{
{PallocChunkPages - 1, PageBase(baseChunkIdx+sumsPerPhysPage-1, 0), 0},
{1, PageBase(baseChunkIdx+sumsPerPhysPage-1, PallocChunkPages-1), 0},
{1, PageBase(baseChunkIdx+chunkIdxBigJump, 0), 0},
{PallocChunkPages - 1, PageBase(baseChunkIdx+chunkIdxBigJump, 1), 0},
{1, 0, 0},
},
after: map[ChunkIdx][]BitRange{
baseChunkIdx + sumsPerPhysPage - 1: {{0, PallocChunkPages}},
baseChunkIdx + chunkIdxBigJump: {{0, PallocChunkPages}},
},
}
}
for name, v := range tests {
v := v
t.Run(name, func(t *testing.T) {

View File

@ -202,17 +202,11 @@ func (b *pallocBits) summarize() pallocSum {
// If find fails to find any free space, it returns an index of ^uint(0) and
// the new searchIdx should be ignored.
//
// The returned searchIdx is always the index of the first free page found
// in this bitmap during the search, except if npages == 1, in which
// case it will be the index just after the first free page, because the
// index returned as the first result is assumed to be allocated and so
// represents a minor optimization for that case.
// Note that if npages == 1, the two returned values will always be identical.
func (b *pallocBits) find(npages uintptr, searchIdx uint) (uint, uint) {
if npages == 1 {
addr := b.find1(searchIdx)
// Return a searchIdx of addr + 1 since we assume addr will be
// allocated.
return addr, addr + 1
return addr, addr
} else if npages <= 64 {
return b.findSmallN(npages, searchIdx)
}