mirror of
https://github.com/golang/go
synced 2024-11-27 03:01:23 -07:00
runtime: generalize {alloc,free}Stack to {alloc,free}Manual
We're going to start using manually-managed spans for GC workbufs, so rename the allocate/free methods and pass in a pointer to the stats to use instead of using the stack stats directly. For #19325. Change-Id: I37df0147ae5a8e1f3cb37d59c8e57a1fcc6f2980 Reviewed-on: https://go-review.googlesource.com/38576 Run-TryBot: Austin Clements <austin@google.com> Reviewed-by: Rick Hudson <rlh@golang.org>
This commit is contained in:
parent
ab9db51e1c
commit
407c56ae9f
@ -238,11 +238,11 @@ go:notinheap
|
||||
------------
|
||||
|
||||
`go:notinheap` applies to type declarations. It indicates that a type
|
||||
must never be heap allocated. Specifically, pointers to this type must
|
||||
always fail the `runtime.inheap` check. The type may be used for
|
||||
global variables, for stack variables, or for objects in unmanaged
|
||||
memory (e.g., allocated with `sysAlloc`, `persistentalloc`, or
|
||||
`fixalloc`). Specifically:
|
||||
must never be allocated from the GC'd heap. Specifically, pointers to
|
||||
this type must always fail the `runtime.inheap` check. The type may be
|
||||
used for global variables, for stack variables, or for objects in
|
||||
unmanaged memory (e.g., allocated with `sysAlloc`, `persistentalloc`,
|
||||
`fixalloc`, or from a manually-managed span). Specifically:
|
||||
|
||||
1. `new(T)`, `make([]T)`, `append([]T, ...)` and implicit heap
|
||||
allocation of T are disallowed. (Though implicit allocations are
|
||||
|
@ -664,11 +664,19 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (h *mheap) allocStack(npage uintptr) *mspan {
|
||||
_g_ := getg()
|
||||
if _g_ != _g_.m.g0 {
|
||||
throw("mheap_allocstack not on g0 stack")
|
||||
}
|
||||
// allocManual allocates a manually-managed span of npage pages and
|
||||
// adds the bytes used to *stat, which should be a memstats in-use
|
||||
// field. allocManual returns nil if allocation fails.
|
||||
//
|
||||
// The memory backing the returned span may not be zeroed if
|
||||
// span.needzero is set.
|
||||
//
|
||||
// allocManual must be called on the system stack to prevent stack
|
||||
// growth. Since this is used by the stack allocator, stack growth
|
||||
// during allocManual would self-deadlock.
|
||||
//
|
||||
//go:systemstack
|
||||
func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
|
||||
lock(&h.lock)
|
||||
s := h.allocSpanLocked(npage)
|
||||
if s != nil {
|
||||
@ -679,10 +687,10 @@ func (h *mheap) allocStack(npage uintptr) *mspan {
|
||||
s.nelems = 0
|
||||
s.elemsize = 0
|
||||
s.limit = s.base() + s.npages<<_PageShift
|
||||
memstats.stacks_inuse += uint64(s.npages << _PageShift)
|
||||
*stat += uint64(s.npages << _PageShift)
|
||||
}
|
||||
|
||||
// This unlock acts as a release barrier. See mHeap_Alloc_m.
|
||||
// This unlock acts as a release barrier. See mheap.alloc_m.
|
||||
unlock(&h.lock)
|
||||
|
||||
return s
|
||||
@ -880,14 +888,21 @@ func (h *mheap) freeSpan(s *mspan, acct int32) {
|
||||
})
|
||||
}
|
||||
|
||||
func (h *mheap) freeStack(s *mspan) {
|
||||
_g_ := getg()
|
||||
if _g_ != _g_.m.g0 {
|
||||
throw("mheap_freestack not on g0 stack")
|
||||
}
|
||||
// freeManual frees a manually-managed span returned by allocManual.
|
||||
// stat must be the same as the stat passed to the allocManual that
|
||||
// allocated s.
|
||||
//
|
||||
// This must only be called when gcphase == _GCoff. See mSpanState for
|
||||
// an explanation.
|
||||
//
|
||||
// freeManual must be called on the system stack to prevent stack
|
||||
// growth, just like allocManual.
|
||||
//
|
||||
//go:systemstack
|
||||
func (h *mheap) freeManual(s *mspan, stat *uint64) {
|
||||
s.needzero = 1
|
||||
lock(&h.lock)
|
||||
memstats.stacks_inuse -= uint64(s.npages << _PageShift)
|
||||
*stat -= uint64(s.npages << _PageShift)
|
||||
h.freeSpanLocked(s, true, true, 0)
|
||||
unlock(&h.lock)
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ func stackpoolalloc(order uint8) gclinkptr {
|
||||
s := list.first
|
||||
if s == nil {
|
||||
// no free stacks. Allocate another span worth.
|
||||
s = mheap_.allocStack(_StackCacheSize >> _PageShift)
|
||||
s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse)
|
||||
if s == nil {
|
||||
throw("out of memory")
|
||||
}
|
||||
@ -248,7 +248,7 @@ func stackpoolfree(x gclinkptr, order uint8) {
|
||||
// By not freeing, we prevent step #4 until GC is done.
|
||||
stackpool[order].remove(s)
|
||||
s.manualFreeList = 0
|
||||
mheap_.freeStack(s)
|
||||
mheap_.freeManual(s, &memstats.stacks_inuse)
|
||||
}
|
||||
}
|
||||
|
||||
@ -390,7 +390,7 @@ func stackalloc(n uint32) stack {
|
||||
|
||||
if s == nil {
|
||||
// Allocate a new stack from the heap.
|
||||
s = mheap_.allocStack(npage)
|
||||
s = mheap_.allocManual(npage, &memstats.stacks_inuse)
|
||||
if s == nil {
|
||||
throw("out of memory")
|
||||
}
|
||||
@ -472,7 +472,7 @@ func stackfree(stk stack) {
|
||||
if gcphase == _GCoff {
|
||||
// Free the stack immediately if we're
|
||||
// sweeping.
|
||||
mheap_.freeStack(s)
|
||||
mheap_.freeManual(s, &memstats.stacks_inuse)
|
||||
} else {
|
||||
// If the GC is running, we can't return a
|
||||
// stack span to the heap because it could be
|
||||
@ -1166,7 +1166,7 @@ func freeStackSpans() {
|
||||
if s.allocCount == 0 {
|
||||
list.remove(s)
|
||||
s.manualFreeList = 0
|
||||
mheap_.freeStack(s)
|
||||
mheap_.freeManual(s, &memstats.stacks_inuse)
|
||||
}
|
||||
s = next
|
||||
}
|
||||
@ -1180,7 +1180,7 @@ func freeStackSpans() {
|
||||
for s := stackLarge.free[i].first; s != nil; {
|
||||
next := s.next
|
||||
stackLarge.free[i].remove(s)
|
||||
mheap_.freeStack(s)
|
||||
mheap_.freeManual(s, &memstats.stacks_inuse)
|
||||
s = next
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user