mirror of
https://github.com/golang/go
synced 2024-11-19 14:04:46 -07:00
51ae88ee2f
Currently large sysReserve calls on some OSes don't actually reserve the memory, but just check that it can be reserved. This was important when we called sysReserve to "reserve" many gigabytes for the heap up front, but now that we map memory in small increments as we need it, this complication is no longer necessary. This has one curious side benefit: currently, on Linux, allocations that are large enough to be rejected by mmap wind up freezing the application for a long time before it panics. This happens because sysReserve doesn't reserve the memory, so sysMap calls mmap_fixed, which calls mmap, which fails because the mapping is too large. However, mmap_fixed doesn't inspect *why* mmap fails, so it falls back to probing every page in the desired region individually with mincore before performing an (otherwise dangerous) MAP_FIXED mapping, which will also fail. This takes a long time for a large region. Now this logic is gone, so the mmap failure leads to an immediate panic. Updates #10460. Change-Id: I8efe88c611871cdb14f99fadd09db83e0161ca2e Reviewed-on: https://go-review.googlesource.com/85888 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
128 lines
3.9 KiB
Go
128 lines
3.9 KiB
Go
// Copyright 2010 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package runtime
|
|
|
|
import (
|
|
"unsafe"
|
|
)
|
|
|
|
const (
|
|
_MEM_COMMIT = 0x1000
|
|
_MEM_RESERVE = 0x2000
|
|
_MEM_DECOMMIT = 0x4000
|
|
_MEM_RELEASE = 0x8000
|
|
|
|
_PAGE_READWRITE = 0x0004
|
|
_PAGE_NOACCESS = 0x0001
|
|
|
|
_ERROR_NOT_ENOUGH_MEMORY = 8
|
|
_ERROR_COMMITMENT_LIMIT = 1455
|
|
)
|
|
|
|
// Don't split the stack as this function may be invoked without a valid G,
|
|
// which prevents us from allocating more stack.
|
|
//go:nosplit
|
|
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
|
|
mSysStatInc(sysStat, n)
|
|
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
|
|
}
|
|
|
|
func sysUnused(v unsafe.Pointer, n uintptr) {
|
|
r := stdcall3(_VirtualFree, uintptr(v), n, _MEM_DECOMMIT)
|
|
if r != 0 {
|
|
return
|
|
}
|
|
|
|
// Decommit failed. Usual reason is that we've merged memory from two different
|
|
// VirtualAlloc calls, and Windows will only let each VirtualFree handle pages from
|
|
// a single VirtualAlloc. It is okay to specify a subset of the pages from a single alloc,
|
|
// just not pages from multiple allocs. This is a rare case, arising only when we're
|
|
// trying to give memory back to the operating system, which happens on a time
|
|
// scale of minutes. It doesn't have to be terribly fast. Instead of extra bookkeeping
|
|
// on all our VirtualAlloc calls, try freeing successively smaller pieces until
|
|
// we manage to free something, and then repeat. This ends up being O(n log n)
|
|
// in the worst case, but that's fast enough.
|
|
for n > 0 {
|
|
small := n
|
|
for small >= 4096 && stdcall3(_VirtualFree, uintptr(v), small, _MEM_DECOMMIT) == 0 {
|
|
small /= 2
|
|
small &^= 4096 - 1
|
|
}
|
|
if small < 4096 {
|
|
print("runtime: VirtualFree of ", small, " bytes failed with errno=", getlasterror(), "\n")
|
|
throw("runtime: failed to decommit pages")
|
|
}
|
|
v = add(v, small)
|
|
n -= small
|
|
}
|
|
}
|
|
|
|
func sysUsed(v unsafe.Pointer, n uintptr) {
|
|
r := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
|
|
if r == uintptr(v) {
|
|
return
|
|
}
|
|
|
|
// Commit failed. See SysUnused.
|
|
for n > 0 {
|
|
small := n
|
|
for small >= 4096 && stdcall4(_VirtualAlloc, uintptr(v), small, _MEM_COMMIT, _PAGE_READWRITE) == 0 {
|
|
small /= 2
|
|
small &^= 4096 - 1
|
|
}
|
|
if small < 4096 {
|
|
print("runtime: VirtualAlloc of ", small, " bytes failed with errno=", getlasterror(), "\n")
|
|
throw("runtime: failed to commit pages")
|
|
}
|
|
v = add(v, small)
|
|
n -= small
|
|
}
|
|
}
|
|
|
|
// Don't split the stack as this function may be invoked without a valid G,
|
|
// which prevents us from allocating more stack.
|
|
//go:nosplit
|
|
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
|
|
mSysStatDec(sysStat, n)
|
|
r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
|
|
if r == 0 {
|
|
print("runtime: VirtualFree of ", n, " bytes failed with errno=", getlasterror(), "\n")
|
|
throw("runtime: failed to release pages")
|
|
}
|
|
}
|
|
|
|
func sysFault(v unsafe.Pointer, n uintptr) {
|
|
// SysUnused makes the memory inaccessible and prevents its reuse
|
|
sysUnused(v, n)
|
|
}
|
|
|
|
func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
|
|
// v is just a hint.
|
|
// First try at v.
|
|
// This will fail if any of [v, v+n) is already reserved.
|
|
v = unsafe.Pointer(stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_RESERVE, _PAGE_READWRITE))
|
|
if v != nil {
|
|
return v
|
|
}
|
|
|
|
// Next let the kernel choose the address.
|
|
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
|
|
}
|
|
|
|
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
|
|
mSysStatInc(sysStat, n)
|
|
p := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
|
|
if p != uintptr(v) {
|
|
errno := getlasterror()
|
|
print("runtime: VirtualAlloc of ", n, " bytes failed with errno=", errno, "\n")
|
|
switch errno {
|
|
case _ERROR_NOT_ENOUGH_MEMORY, _ERROR_COMMITMENT_LIMIT:
|
|
throw("out of memory")
|
|
default:
|
|
throw("runtime: cannot map pages in arena address space")
|
|
}
|
|
}
|
|
}
|