1
0
mirror of https://github.com/golang/go synced 2024-11-19 15:34:47 -07:00

runtime: clarify address space limit constants and comments

Now that we support the full non-contiguous virtual address space of
amd64 hardware, some of the comments and constants related to this are
out of date.

This renames memLimitBits to heapAddrBits because 1<<memLimitBits is
no longer the limit of the address space and rewrites the comment to
focus first on hardware limits (which span OSes) and then discuss
kernel limits.

Second, this eliminates the memLimit constant because there's no
longer a meaningful "highest possible heap pointer value" on amd64.

Updates #23862.

Change-Id: I44b32033d2deb6b69248fb8dda14fc0e65c47f11
Reviewed-on: https://go-review.googlesource.com/95498
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
This commit is contained in:
Austin Clements 2018-02-20 11:59:02 -05:00
parent ed1959c6e6
commit ea8d7a370d
5 changed files with 41 additions and 30 deletions

View File

@ -11,7 +11,7 @@ import "unsafe"
const ( const (
// addrBits is the number of bits needed to represent a virtual address. // addrBits is the number of bits needed to represent a virtual address.
// //
// See memLimitBits for a table of address space sizes on // See heapAddrBits for a table of address space sizes on
// various architectures. 48 bits is enough for all // various architectures. 48 bits is enough for all
// architectures except s390x. // architectures except s390x.
// //

View File

@ -156,12 +156,32 @@ const (
// plan9 | 4KB | 3 // plan9 | 4KB | 3
_NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9 _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
// memLimitBits is the maximum number of bits in a heap address. // heapAddrBits is the number of bits in a heap address. On
// amd64, addresses are sign-extended beyond heapAddrBits. On
// other arches, they are zero-extended.
// //
// On 64-bit platforms, we limit this to 48 bits because that // On 64-bit platforms, we limit this to 48 bits based on a
// is the maximum supported by Linux across all 64-bit // combination of hardware and OS limitations.
// architectures, with the exception of s390x. Based on //
// processor.h: // amd64 hardware limits addresses to 48 bits, sign-extended
// to 64 bits. Addresses where the top 16 bits are not either
// all 0 or all 1 are "non-canonical" and invalid. Because of
// these "negative" addresses, we offset addresses by 1<<47
// (arenaBaseOffset) on amd64 before computing indexes into
// the heap arenas index. In 2017, amd64 hardware added
// support for 57 bit addresses; however, currently only Linux
// supports this extension and the kernel will never choose an
// address above 1<<47 unless mmap is called with a hint
// address above 1<<47 (which we never do).
//
// arm64 hardware (as of ARMv8) limits user addresses to 48
// bits, in the range [0, 1<<48).
//
// ppc64, mips64, and s390x support arbitrary 64 bit addresses
// in hardware. However, since Go only supports Linux on
// these, we lean on OS limits. Based on Linux's processor.h,
// the user address space is limited as follows on 64-bit
// architectures:
// //
// Architecture Name Maximum Value (exclusive) // Architecture Name Maximum Value (exclusive)
// --------------------------------------------------------------------- // ---------------------------------------------------------------------
@ -171,15 +191,12 @@ const (
// mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses) // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses)
// s390x TASK_SIZE 1<<64 (64 bit addresses) // s390x TASK_SIZE 1<<64 (64 bit addresses)
// //
// These values may increase over time. In particular, ppc64 // These limits may increase over time, but are currently at
// and mips64 support arbitrary 64-bit addresses in hardware, // most 48 bits except on s390x. On all architectures, Linux
// but Linux imposes the above limits. amd64 has hardware // starts placing mmap'd regions at addresses that are
// support for 57 bit addresses as of 2017 (56 bits for user // significantly below 48 bits, so even if it's possible to
// space), but Linux only uses addresses above 1<<47 for // exceed Go's 48 bit limit, it's extremely unlikely in
// mappings that explicitly pass a high hint address. // practice.
//
// s390x supports full 64-bit addresses, but the allocator
// will panic in the unlikely event we exceed 48 bits.
// //
// On 32-bit platforms, we accept the full 32-bit address // On 32-bit platforms, we accept the full 32-bit address
// space because doing so is cheap. // space because doing so is cheap.
@ -187,22 +204,17 @@ const (
// we further limit it to 31 bits. // we further limit it to 31 bits.
// //
// The size of the arena index is proportional to // The size of the arena index is proportional to
// 1<<memLimitBits, so it's important that this not be too // 1<<heapAddrBits, so it's important that this not be too
// large. 48 bits is about the threshold; above that we would // large. 48 bits is about the threshold; above that we would
// need to go to a two level arena index. // need to go to a two level arena index.
memLimitBits = _64bit*48 + (1-_64bit)*(32-(sys.GoarchMips+sys.GoarchMipsle)) heapAddrBits = _64bit*48 + (1-_64bit)*(32-(sys.GoarchMips+sys.GoarchMipsle))
// memLimit is one past the highest possible heap pointer value.
//
// This is also the maximum heap pointer value.
memLimit = 1 << memLimitBits
// maxAlloc is the maximum size of an allocation. On 64-bit, // maxAlloc is the maximum size of an allocation. On 64-bit,
// it's theoretically possible to allocate memLimit bytes. On // it's theoretically possible to allocate 1<<heapAddrBits bytes. On
// 32-bit, however, this is one less than memLimit because the // 32-bit, however, this is one less than 1<<32 because the
// number of bytes in the address space doesn't actually fit // number of bytes in the address space doesn't actually fit
// in a uintptr. // in a uintptr.
maxAlloc = memLimit - (1-_64bit)*1 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
// heapArenaBytes is the size of a heap arena. The heap // heapArenaBytes is the size of a heap arena. The heap
// consists of mappings of size heapArenaBytes, aligned to // consists of mappings of size heapArenaBytes, aligned to
@ -312,8 +324,7 @@ func mallocinit() {
} }
// Map the arena index. Most of this will never be written to, // Map the arena index. Most of this will never be written to,
// so we don't account it. mheap_.arenas = (*[(1 << heapAddrBits) / heapArenaBytes]*heapArena)(persistentalloc(unsafe.Sizeof(*mheap_.arenas), sys.PtrSize, nil))
mheap_.arenas = (*[memLimit / heapArenaBytes]*heapArena)(persistentalloc(unsafe.Sizeof(*mheap_.arenas), sys.PtrSize, nil))
if mheap_.arenas == nil { if mheap_.arenas == nil {
throw("failed to allocate arena index") throw("failed to allocate arena index")
} }

View File

@ -147,7 +147,7 @@ type heapBits struct {
// Make the compiler check that heapBits.arena is large enough to hold // Make the compiler check that heapBits.arena is large enough to hold
// the maximum arena index. // the maximum arena index.
var _ = heapBits{arena: memLimit / heapArenaBytes} var _ = heapBits{arena: (1<<heapAddrBits)/heapArenaBytes - 1}
// markBits provides access to the mark bit for an object in the heap. // markBits provides access to the mark bit for an object in the heap.
// bytep points to the byte holding the mark bit. // bytep points to the byte holding the mark bit.

View File

@ -112,7 +112,7 @@ type mheap struct {
// //
// This structure is fully mapped by mallocinit, so it's safe // This structure is fully mapped by mallocinit, so it's safe
// to probe any index. // to probe any index.
arenas *[memLimit / heapArenaBytes]*heapArena arenas *[(1 << heapAddrBits) / heapArenaBytes]*heapArena
// heapArenaAlloc is pre-reserved space for allocating heapArena // heapArenaAlloc is pre-reserved space for allocating heapArena
// objects. This is only used on 32-bit, where we pre-reserve // objects. This is only used on 32-bit, where we pre-reserve

View File

@ -144,7 +144,7 @@ var stackpoolmu mutex
// Global pool of large stack spans. // Global pool of large stack spans.
var stackLarge struct { var stackLarge struct {
lock mutex lock mutex
free [memLimitBits - pageShift]mSpanList // free lists by log_2(s.npages) free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages)
} }
func stackinit() { func stackinit() {