diff --git a/src/runtime/HACKING.md b/src/runtime/HACKING.md index ea7c5c128d8..883559c690d 100644 --- a/src/runtime/HACKING.md +++ b/src/runtime/HACKING.md @@ -238,11 +238,11 @@ go:notinheap ------------ `go:notinheap` applies to type declarations. It indicates that a type -must never be heap allocated. Specifically, pointers to this type must -always fail the `runtime.inheap` check. The type may be used for -global variables, for stack variables, or for objects in unmanaged -memory (e.g., allocated with `sysAlloc`, `persistentalloc`, or -`fixalloc`). Specifically: +must never be allocated from the GC'd heap. Specifically, pointers to +this type must always fail the `runtime.inheap` check. The type may be +used for global variables, for stack variables, or for objects in +unmanaged memory (e.g., allocated with `sysAlloc`, `persistentalloc`, +`fixalloc`, or from a manually-managed span). Specifically: 1. `new(T)`, `make([]T)`, `append([]T, ...)` and implicit heap allocation of T are disallowed. (Though implicit allocations are diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 80349a97318..76e56828b62 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -664,11 +664,19 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) return s } -func (h *mheap) allocStack(npage uintptr) *mspan { - _g_ := getg() - if _g_ != _g_.m.g0 { - throw("mheap_allocstack not on g0 stack") - } +// allocManual allocates a manually-managed span of npage pages and +// adds the bytes used to *stat, which should be a memstats in-use +// field. allocManual returns nil if allocation fails. +// +// The memory backing the returned span may not be zeroed if +// span.needzero is set. +// +// allocManual must be called on the system stack to prevent stack +// growth. Since this is used by the stack allocator, stack growth +// during allocManual would self-deadlock. +// +//go:systemstack +func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan { lock(&h.lock) s := h.allocSpanLocked(npage) if s != nil { @@ -679,10 +687,10 @@ func (h *mheap) allocStack(npage uintptr) *mspan { s.nelems = 0 s.elemsize = 0 s.limit = s.base() + s.npages<<_PageShift - memstats.stacks_inuse += uint64(s.npages << _PageShift) + *stat += uint64(s.npages << _PageShift) } - // This unlock acts as a release barrier. See mHeap_Alloc_m. + // This unlock acts as a release barrier. See mheap.alloc_m. unlock(&h.lock) return s @@ -880,14 +888,21 @@ func (h *mheap) freeSpan(s *mspan, acct int32) { }) } -func (h *mheap) freeStack(s *mspan) { - _g_ := getg() - if _g_ != _g_.m.g0 { - throw("mheap_freestack not on g0 stack") - } +// freeManual frees a manually-managed span returned by allocManual. +// stat must be the same as the stat passed to the allocManual that +// allocated s. +// +// This must only be called when gcphase == _GCoff. See mSpanState for +// an explanation. +// +// freeManual must be called on the system stack to prevent stack +// growth, just like allocManual. +// +//go:systemstack +func (h *mheap) freeManual(s *mspan, stat *uint64) { s.needzero = 1 lock(&h.lock) - memstats.stacks_inuse -= uint64(s.npages << _PageShift) + *stat -= uint64(s.npages << _PageShift) h.freeSpanLocked(s, true, true, 0) unlock(&h.lock) } diff --git a/src/runtime/stack.go b/src/runtime/stack.go index 9e00edde61b..562427a6a21 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -186,7 +186,7 @@ func stackpoolalloc(order uint8) gclinkptr { s := list.first if s == nil { // no free stacks. Allocate another span worth. - s = mheap_.allocStack(_StackCacheSize >> _PageShift) + s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse) if s == nil { throw("out of memory") } @@ -248,7 +248,7 @@ func stackpoolfree(x gclinkptr, order uint8) { // By not freeing, we prevent step #4 until GC is done. stackpool[order].remove(s) s.manualFreeList = 0 - mheap_.freeStack(s) + mheap_.freeManual(s, &memstats.stacks_inuse) } } @@ -390,7 +390,7 @@ func stackalloc(n uint32) stack { if s == nil { // Allocate a new stack from the heap. - s = mheap_.allocStack(npage) + s = mheap_.allocManual(npage, &memstats.stacks_inuse) if s == nil { throw("out of memory") } @@ -472,7 +472,7 @@ func stackfree(stk stack) { if gcphase == _GCoff { // Free the stack immediately if we're // sweeping. - mheap_.freeStack(s) + mheap_.freeManual(s, &memstats.stacks_inuse) } else { // If the GC is running, we can't return a // stack span to the heap because it could be @@ -1166,7 +1166,7 @@ func freeStackSpans() { if s.allocCount == 0 { list.remove(s) s.manualFreeList = 0 - mheap_.freeStack(s) + mheap_.freeManual(s, &memstats.stacks_inuse) } s = next } @@ -1180,7 +1180,7 @@ func freeStackSpans() { for s := stackLarge.free[i].first; s != nil; { next := s.next stackLarge.free[i].remove(s) - mheap_.freeStack(s) + mheap_.freeManual(s, &memstats.stacks_inuse) s = next } }