2016-03-01 15:57:46 -07:00
|
|
|
// Copyright 2010 The Go Authors. All rights reserved.
|
2014-11-21 11:39:01 -07:00
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
|
|
import "unsafe"
|
|
|
|
|
2015-02-26 14:41:33 -07:00
|
|
|
const memDebug = false
|
|
|
|
|
2014-11-21 11:39:01 -07:00
|
|
|
var bloc uintptr
|
|
|
|
var memlock mutex
|
|
|
|
|
2015-02-26 14:41:33 -07:00
|
|
|
type memHdr struct {
|
2015-11-18 08:28:59 -07:00
|
|
|
next memHdrPtr
|
2015-02-26 14:41:33 -07:00
|
|
|
size uintptr
|
|
|
|
}
|
|
|
|
|
2015-11-18 08:28:59 -07:00
|
|
|
var memFreelist memHdrPtr // sorted in ascending order
|
|
|
|
|
|
|
|
type memHdrPtr uintptr
|
|
|
|
|
|
|
|
func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) }
|
|
|
|
func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) }
|
2015-02-26 14:41:33 -07:00
|
|
|
|
|
|
|
func memAlloc(n uintptr) unsafe.Pointer {
|
|
|
|
n = memRound(n)
|
|
|
|
var prevp *memHdr
|
2015-11-18 08:28:59 -07:00
|
|
|
for p := memFreelist.ptr(); p != nil; p = p.next.ptr() {
|
2015-02-26 14:41:33 -07:00
|
|
|
if p.size >= n {
|
|
|
|
if p.size == n {
|
|
|
|
if prevp != nil {
|
|
|
|
prevp.next = p.next
|
|
|
|
} else {
|
|
|
|
memFreelist = p.next
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
p.size -= n
|
|
|
|
p = (*memHdr)(add(unsafe.Pointer(p), p.size))
|
|
|
|
}
|
2016-10-17 16:41:56 -06:00
|
|
|
*p = memHdr{}
|
2015-02-26 14:41:33 -07:00
|
|
|
return unsafe.Pointer(p)
|
|
|
|
}
|
|
|
|
prevp = p
|
|
|
|
}
|
|
|
|
return sbrk(n)
|
|
|
|
}
|
|
|
|
|
|
|
|
func memFree(ap unsafe.Pointer, n uintptr) {
|
|
|
|
n = memRound(n)
|
2016-10-17 16:41:56 -06:00
|
|
|
memclrNoHeapPointers(ap, n)
|
2015-02-26 14:41:33 -07:00
|
|
|
bp := (*memHdr)(ap)
|
|
|
|
bp.size = n
|
|
|
|
bpn := uintptr(ap)
|
2015-11-18 08:28:59 -07:00
|
|
|
if memFreelist == 0 {
|
|
|
|
bp.next = 0
|
|
|
|
memFreelist.set(bp)
|
2015-02-26 14:41:33 -07:00
|
|
|
return
|
|
|
|
}
|
2015-11-18 08:28:59 -07:00
|
|
|
p := memFreelist.ptr()
|
2015-02-26 14:41:33 -07:00
|
|
|
if bpn < uintptr(unsafe.Pointer(p)) {
|
2015-11-18 08:28:59 -07:00
|
|
|
memFreelist.set(bp)
|
2015-02-26 14:41:33 -07:00
|
|
|
if bpn+bp.size == uintptr(unsafe.Pointer(p)) {
|
|
|
|
bp.size += p.size
|
|
|
|
bp.next = p.next
|
2016-10-17 16:41:56 -06:00
|
|
|
*p = memHdr{}
|
2015-02-26 14:41:33 -07:00
|
|
|
} else {
|
2015-11-18 08:28:59 -07:00
|
|
|
bp.next.set(p)
|
2015-02-26 14:41:33 -07:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2015-11-18 08:28:59 -07:00
|
|
|
for ; p.next != 0; p = p.next.ptr() {
|
2015-02-26 14:41:33 -07:00
|
|
|
if bpn > uintptr(unsafe.Pointer(p)) && bpn < uintptr(unsafe.Pointer(p.next)) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if bpn+bp.size == uintptr(unsafe.Pointer(p.next)) {
|
2015-11-18 08:28:59 -07:00
|
|
|
bp.size += p.next.ptr().size
|
|
|
|
bp.next = p.next.ptr().next
|
2016-10-17 16:41:56 -06:00
|
|
|
*p.next.ptr() = memHdr{}
|
2015-02-26 14:41:33 -07:00
|
|
|
} else {
|
|
|
|
bp.next = p.next
|
|
|
|
}
|
|
|
|
if uintptr(unsafe.Pointer(p))+p.size == bpn {
|
|
|
|
p.size += bp.size
|
|
|
|
p.next = bp.next
|
2016-10-17 16:41:56 -06:00
|
|
|
*bp = memHdr{}
|
2015-02-26 14:41:33 -07:00
|
|
|
} else {
|
2015-11-18 08:28:59 -07:00
|
|
|
p.next.set(bp)
|
2015-02-26 14:41:33 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func memCheck() {
|
|
|
|
if memDebug == false {
|
|
|
|
return
|
|
|
|
}
|
2015-11-18 08:28:59 -07:00
|
|
|
for p := memFreelist.ptr(); p != nil && p.next != 0; p = p.next.ptr() {
|
2015-02-26 14:41:33 -07:00
|
|
|
if uintptr(unsafe.Pointer(p)) == uintptr(unsafe.Pointer(p.next)) {
|
|
|
|
print("runtime: ", unsafe.Pointer(p), " == ", unsafe.Pointer(p.next), "\n")
|
|
|
|
throw("mem: infinite loop")
|
|
|
|
}
|
|
|
|
if uintptr(unsafe.Pointer(p)) > uintptr(unsafe.Pointer(p.next)) {
|
|
|
|
print("runtime: ", unsafe.Pointer(p), " > ", unsafe.Pointer(p.next), "\n")
|
|
|
|
throw("mem: unordered list")
|
|
|
|
}
|
|
|
|
if uintptr(unsafe.Pointer(p))+p.size > uintptr(unsafe.Pointer(p.next)) {
|
|
|
|
print("runtime: ", unsafe.Pointer(p), "+", p.size, " > ", unsafe.Pointer(p.next), "\n")
|
|
|
|
throw("mem: overlapping blocks")
|
|
|
|
}
|
|
|
|
for b := add(unsafe.Pointer(p), unsafe.Sizeof(memHdr{})); uintptr(b) < uintptr(unsafe.Pointer(p))+p.size; b = add(b, 1) {
|
|
|
|
if *(*byte)(b) != 0 {
|
|
|
|
print("runtime: value at addr ", b, " with offset ", uintptr(b)-uintptr(unsafe.Pointer(p)), " in block ", p, " of size ", p.size, " is not zero\n")
|
|
|
|
throw("mem: uninitialised memory")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-30 04:36:12 -07:00
|
|
|
func memRound(p uintptr) uintptr {
|
|
|
|
return (p + _PAGESIZE - 1) &^ (_PAGESIZE - 1)
|
|
|
|
}
|
2014-11-21 11:39:01 -07:00
|
|
|
|
|
|
|
func initBloc() {
|
2015-04-06 18:55:02 -06:00
|
|
|
bloc = memRound(firstmoduledata.end)
|
2014-11-21 11:39:01 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func sbrk(n uintptr) unsafe.Pointer {
|
|
|
|
// Plan 9 sbrk from /sys/src/libc/9sys/sbrk.c
|
2015-01-30 04:36:12 -07:00
|
|
|
bl := bloc
|
|
|
|
n = memRound(n)
|
2014-11-21 11:39:01 -07:00
|
|
|
if brk_(unsafe.Pointer(bl+n)) < 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2015-01-30 04:36:12 -07:00
|
|
|
bloc += n
|
2014-11-21 11:39:01 -07:00
|
|
|
return unsafe.Pointer(bl)
|
|
|
|
}
|
|
|
|
|
2015-04-16 15:32:18 -06:00
|
|
|
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
|
2015-02-26 14:41:33 -07:00
|
|
|
lock(&memlock)
|
|
|
|
p := memAlloc(n)
|
|
|
|
memCheck()
|
|
|
|
unlock(&memlock)
|
2014-11-21 11:39:01 -07:00
|
|
|
if p != nil {
|
2015-04-16 15:32:18 -06:00
|
|
|
mSysStatInc(sysStat, n)
|
2014-11-21 11:39:01 -07:00
|
|
|
}
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2015-04-16 15:32:18 -06:00
|
|
|
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {
|
|
|
|
mSysStatDec(sysStat, n)
|
2014-11-21 11:39:01 -07:00
|
|
|
lock(&memlock)
|
2015-02-26 14:41:33 -07:00
|
|
|
memFree(v, n)
|
|
|
|
memCheck()
|
2014-11-21 11:39:01 -07:00
|
|
|
unlock(&memlock)
|
|
|
|
}
|
|
|
|
|
|
|
|
func sysUnused(v unsafe.Pointer, n uintptr) {
|
|
|
|
}
|
|
|
|
|
|
|
|
func sysUsed(v unsafe.Pointer, n uintptr) {
|
|
|
|
}
|
|
|
|
|
runtime: remove non-reserved heap logic
Currently large sysReserve calls on some OSes don't actually reserve
the memory, but just check that it can be reserved. This was important
when we called sysReserve to "reserve" many gigabytes for the heap up
front, but now that we map memory in small increments as we need it,
this complication is no longer necessary.
This has one curious side benefit: currently, on Linux, allocations
that are large enough to be rejected by mmap wind up freezing the
application for a long time before it panics. This happens because
sysReserve doesn't reserve the memory, so sysMap calls mmap_fixed,
which calls mmap, which fails because the mapping is too large.
However, mmap_fixed doesn't inspect *why* mmap fails, so it falls back
to probing every page in the desired region individually with mincore
before performing an (otherwise dangerous) MAP_FIXED mapping, which
will also fail. This takes a long time for a large region. Now this
logic is gone, so the mmap failure leads to an immediate panic.
Updates #10460.
Change-Id: I8efe88c611871cdb14f99fadd09db83e0161ca2e
Reviewed-on: https://go-review.googlesource.com/85888
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
2017-12-30 17:35:46 -07:00
|
|
|
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
|
2014-11-21 11:39:01 -07:00
|
|
|
// sysReserve has already allocated all heap memory,
|
|
|
|
// but has not adjusted stats.
|
2015-04-16 15:32:18 -06:00
|
|
|
mSysStatInc(sysStat, n)
|
2014-11-21 11:39:01 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func sysFault(v unsafe.Pointer, n uintptr) {
|
|
|
|
}
|
|
|
|
|
runtime: remove non-reserved heap logic
Currently large sysReserve calls on some OSes don't actually reserve
the memory, but just check that it can be reserved. This was important
when we called sysReserve to "reserve" many gigabytes for the heap up
front, but now that we map memory in small increments as we need it,
this complication is no longer necessary.
This has one curious side benefit: currently, on Linux, allocations
that are large enough to be rejected by mmap wind up freezing the
application for a long time before it panics. This happens because
sysReserve doesn't reserve the memory, so sysMap calls mmap_fixed,
which calls mmap, which fails because the mapping is too large.
However, mmap_fixed doesn't inspect *why* mmap fails, so it falls back
to probing every page in the desired region individually with mincore
before performing an (otherwise dangerous) MAP_FIXED mapping, which
will also fail. This takes a long time for a large region. Now this
logic is gone, so the mmap failure leads to an immediate panic.
Updates #10460.
Change-Id: I8efe88c611871cdb14f99fadd09db83e0161ca2e
Reviewed-on: https://go-review.googlesource.com/85888
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
2017-12-30 17:35:46 -07:00
|
|
|
func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
|
2015-02-26 14:41:33 -07:00
|
|
|
lock(&memlock)
|
|
|
|
p := memAlloc(n)
|
|
|
|
memCheck()
|
|
|
|
unlock(&memlock)
|
|
|
|
return p
|
2014-11-21 11:39:01 -07:00
|
|
|
}
|