From 8ebc58452af3a586a3da1f68725bc83c78d4b073 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 29 Jul 2020 20:25:05 +0000 Subject: [PATCH] runtime: delineate which memstats are system stats with a type This change modifies the type of several mstats fields to be a new type: sysMemStat. This type has the same structure as the fields used to have. The purpose of this change is to make it very clear which stats may be used in various functions for accounting (usually the platform-specific sys* functions, but there are others). Currently there's an implicit understanding that the *uint64 value passed to these functions is some kind of statistic whose value is atomically managed. This understanding isn't inherently problematic, but we're about to change how some stats (which currently use mSysStatInc and mSysStatDec) work, so we want to make it very clear what the various requirements are around "sysStat". This change also removes mSysStatInc and mSysStatDec in favor of a method on sysMemStat. Note that those two functions were originally written the way they were because atomic 64-bit adds required a valid G on ARM, but this hasn't been the case for a very long time (since golang.org/cl/14204, but even before then it wasn't clear if mutexes required a valid G anymore). Today we implement 64-bit adds on ARM with a spinlock table. Change-Id: I4e9b37cf14afc2ae20cf736e874eb0064af086d7 Reviewed-on: https://go-review.googlesource.com/c/go/+/246971 Run-TryBot: Michael Knyszek TryBot-Result: Go Bot Trust: Michael Knyszek Reviewed-by: Michael Pratt --- src/runtime/export_test.go | 4 +- src/runtime/heapdump.go | 14 +++---- src/runtime/malloc.go | 10 ++--- src/runtime/mem_aix.go | 12 +++--- src/runtime/mem_bsd.go | 12 +++--- src/runtime/mem_darwin.go | 12 +++--- src/runtime/mem_js.go | 10 ++--- src/runtime/mem_linux.go | 12 +++--- src/runtime/mem_plan9.go | 12 +++--- src/runtime/mem_windows.go | 12 +++--- src/runtime/mfixalloc.go | 4 +- src/runtime/mgcscavenge.go | 4 +- src/runtime/mheap.go | 28 +++++++------ src/runtime/mpagealloc.go | 4 +- src/runtime/mranges.go | 4 +- src/runtime/mstats.go | 82 ++++++++++++++------------------------ src/runtime/os_darwin.go | 3 +- 17 files changed, 109 insertions(+), 130 deletions(-) diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 47cbc286f6..cb753ee819 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -820,7 +820,7 @@ type AddrRanges struct { // Add. func NewAddrRanges() AddrRanges { r := addrRanges{} - r.init(new(uint64)) + r.init(new(sysMemStat)) return AddrRanges{r, true} } @@ -844,7 +844,7 @@ func MakeAddrRanges(a ...AddrRange) AddrRanges { return AddrRanges{addrRanges{ ranges: ranges, totalBytes: total, - sysStat: new(uint64), + sysStat: new(sysMemStat), }, false} } diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 4c35309211..495ecc5164 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -548,20 +548,20 @@ func dumpmemstats() { dumpint(memstats.nmalloc) dumpint(memstats.nfree) dumpint(memstats.heap_alloc) - dumpint(memstats.heap_sys) + dumpint(memstats.heap_sys.load()) dumpint(memstats.heap_idle) dumpint(memstats.heap_inuse) dumpint(memstats.heap_released) dumpint(memstats.heap_objects) dumpint(memstats.stacks_inuse) - dumpint(memstats.stacks_sys) + dumpint(memstats.stacks_sys.load()) dumpint(memstats.mspan_inuse) - dumpint(memstats.mspan_sys) + dumpint(memstats.mspan_sys.load()) dumpint(memstats.mcache_inuse) - dumpint(memstats.mcache_sys) - dumpint(memstats.buckhash_sys) - dumpint(memstats.gc_sys) - dumpint(memstats.other_sys) + dumpint(memstats.mcache_sys.load()) + dumpint(memstats.buckhash_sys.load()) + dumpint(memstats.gc_sys.load()) + dumpint(memstats.other_sys.load()) dumpint(memstats.next_gc) dumpint(memstats.last_gc_unix) dumpint(memstats.pause_total_ns) diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 0f48d7f68e..27d678d917 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -1313,7 +1313,7 @@ var persistentChunks *notInHeap // The returned memory will be zeroed. // // Consider marking persistentalloc'd types go:notinheap. -func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { +func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { var p *notInHeap systemstack(func() { p = persistentalloc1(size, align, sysStat) @@ -1324,7 +1324,7 @@ func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { // Must run on system stack because stack growth can (re)invoke it. // See issue 9174. //go:systemstack -func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap { +func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap { const ( maxBlock = 64 << 10 // VM reservation granularity is 64K on windows ) @@ -1383,8 +1383,8 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap { } if sysStat != &memstats.other_sys { - mSysStatInc(sysStat, size) - mSysStatDec(&memstats.other_sys, size) + sysStat.add(int64(size)) + memstats.other_sys.add(-int64(size)) } return p } @@ -1425,7 +1425,7 @@ func (l *linearAlloc) init(base, size uintptr) { l.end = base + size } -func (l *linearAlloc) alloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { +func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { p := alignUp(l.next, align) if p+size > l.end { return nil diff --git a/src/runtime/mem_aix.go b/src/runtime/mem_aix.go index 7e145b072a..957aa4dcc2 100644 --- a/src/runtime/mem_aix.go +++ b/src/runtime/mem_aix.go @@ -11,7 +11,7 @@ import ( // Don't split the stack as this method may be invoked without a valid G, which // prevents us from allocating more stack. //go:nosplit -func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { +func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer { p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { if err == _EACCES { @@ -24,7 +24,7 @@ func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { } return nil } - mSysStatInc(sysStat, n) + sysStat.add(int64(n)) return p } @@ -41,8 +41,8 @@ func sysHugePage(v unsafe.Pointer, n uintptr) { // Don't split the stack as this function may be invoked without a valid G, // which prevents us from allocating more stack. //go:nosplit -func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { - mSysStatDec(sysStat, n) +func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { + sysStat.add(-int64(n)) munmap(v, n) } @@ -59,8 +59,8 @@ func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer { return p } -func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { - mSysStatInc(sysStat, n) +func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { + sysStat.add(int64(n)) // AIX does not allow mapping a range that is already mapped. // So, call mprotect to change permissions. diff --git a/src/runtime/mem_bsd.go b/src/runtime/mem_bsd.go index 4d860e7bd3..bc672019fb 100644 --- a/src/runtime/mem_bsd.go +++ b/src/runtime/mem_bsd.go @@ -13,12 +13,12 @@ import ( // Don't split the stack as this function may be invoked without a valid G, // which prevents us from allocating more stack. //go:nosplit -func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { +func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer { v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { return nil } - mSysStatInc(sysStat, n) + sysStat.add(int64(n)) return v } @@ -35,8 +35,8 @@ func sysHugePage(v unsafe.Pointer, n uintptr) { // Don't split the stack as this function may be invoked without a valid G, // which prevents us from allocating more stack. //go:nosplit -func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { - mSysStatDec(sysStat, n) +func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { + sysStat.add(-int64(n)) munmap(v, n) } @@ -65,8 +65,8 @@ func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer { const _sunosEAGAIN = 11 const _ENOMEM = 12 -func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { - mSysStatInc(sysStat, n) +func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { + sysStat.add(int64(n)) p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) if err == _ENOMEM || ((GOOS == "solaris" || GOOS == "illumos") && err == _sunosEAGAIN) { diff --git a/src/runtime/mem_darwin.go b/src/runtime/mem_darwin.go index 3b5d565b0f..7fccd2bb8e 100644 --- a/src/runtime/mem_darwin.go +++ b/src/runtime/mem_darwin.go @@ -11,12 +11,12 @@ import ( // Don't split the stack as this function may be invoked without a valid G, // which prevents us from allocating more stack. //go:nosplit -func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { +func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer { v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { return nil } - mSysStatInc(sysStat, n) + sysStat.add(int64(n)) return v } @@ -39,8 +39,8 @@ func sysHugePage(v unsafe.Pointer, n uintptr) { // Don't split the stack as this function may be invoked without a valid G, // which prevents us from allocating more stack. //go:nosplit -func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { - mSysStatDec(sysStat, n) +func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { + sysStat.add(-int64(n)) munmap(v, n) } @@ -58,8 +58,8 @@ func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer { const _ENOMEM = 12 -func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { - mSysStatInc(sysStat, n) +func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { + sysStat.add(int64(n)) p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) if err == _ENOMEM { diff --git a/src/runtime/mem_js.go b/src/runtime/mem_js.go index 092b3d4fa2..957ed36ffa 100644 --- a/src/runtime/mem_js.go +++ b/src/runtime/mem_js.go @@ -13,7 +13,7 @@ import ( // Don't split the stack as this function may be invoked without a valid G, // which prevents us from allocating more stack. //go:nosplit -func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { +func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer { p := sysReserve(nil, n) sysMap(p, n, sysStat) return p @@ -31,8 +31,8 @@ func sysHugePage(v unsafe.Pointer, n uintptr) { // Don't split the stack as this function may be invoked without a valid G, // which prevents us from allocating more stack. //go:nosplit -func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { - mSysStatDec(sysStat, n) +func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { + sysStat.add(-int64(n)) } func sysFault(v unsafe.Pointer, n uintptr) { @@ -80,6 +80,6 @@ func growMemory(pages int32) int32 // This allows the front-end to replace the old DataView object with a new one. func resetMemoryDataView() -func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { - mSysStatInc(sysStat, n) +func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { + sysStat.add(int64(n)) } diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go index 59b0bca970..3436851091 100644 --- a/src/runtime/mem_linux.go +++ b/src/runtime/mem_linux.go @@ -17,7 +17,7 @@ const ( // Don't split the stack as this method may be invoked without a valid G, which // prevents us from allocating more stack. //go:nosplit -func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { +func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer { p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { if err == _EACCES { @@ -30,7 +30,7 @@ func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { } return nil } - mSysStatInc(sysStat, n) + sysStat.add(int64(n)) return p } @@ -144,8 +144,8 @@ func sysHugePage(v unsafe.Pointer, n uintptr) { // Don't split the stack as this function may be invoked without a valid G, // which prevents us from allocating more stack. //go:nosplit -func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { - mSysStatDec(sysStat, n) +func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { + sysStat.add(-int64(n)) munmap(v, n) } @@ -161,8 +161,8 @@ func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer { return p } -func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { - mSysStatInc(sysStat, n) +func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { + sysStat.add(int64(n)) p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) if err == _ENOMEM { diff --git a/src/runtime/mem_plan9.go b/src/runtime/mem_plan9.go index 4fea851cdd..53d8e6dffa 100644 --- a/src/runtime/mem_plan9.go +++ b/src/runtime/mem_plan9.go @@ -140,19 +140,19 @@ func sbrk(n uintptr) unsafe.Pointer { return unsafe.Pointer(bl) } -func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { +func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer { lock(&memlock) p := memAlloc(n) memCheck() unlock(&memlock) if p != nil { - mSysStatInc(sysStat, n) + sysStat.add(int64(n)) } return p } -func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { - mSysStatDec(sysStat, n) +func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { + sysStat.add(-int64(n)) lock(&memlock) if uintptr(v)+n == bloc { // Address range being freed is at the end of memory, @@ -176,10 +176,10 @@ func sysUsed(v unsafe.Pointer, n uintptr) { func sysHugePage(v unsafe.Pointer, n uintptr) { } -func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { +func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { // sysReserve has already allocated all heap memory, // but has not adjusted stats. - mSysStatInc(sysStat, n) + sysStat.add(int64(n)) } func sysFault(v unsafe.Pointer, n uintptr) { diff --git a/src/runtime/mem_windows.go b/src/runtime/mem_windows.go index 165062ec27..3a805b9767 100644 --- a/src/runtime/mem_windows.go +++ b/src/runtime/mem_windows.go @@ -24,8 +24,8 @@ const ( // Don't split the stack as this function may be invoked without a valid G, // which prevents us from allocating more stack. //go:nosplit -func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { - mSysStatInc(sysStat, n) +func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer { + sysStat.add(int64(n)) return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE)) } @@ -97,8 +97,8 @@ func sysHugePage(v unsafe.Pointer, n uintptr) { // Don't split the stack as this function may be invoked without a valid G, // which prevents us from allocating more stack. //go:nosplit -func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { - mSysStatDec(sysStat, n) +func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { + sysStat.add(-int64(n)) r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE) if r == 0 { print("runtime: VirtualFree of ", n, " bytes failed with errno=", getlasterror(), "\n") @@ -124,6 +124,6 @@ func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer { return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE)) } -func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { - mSysStatInc(sysStat, n) +func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { + sysStat.add(int64(n)) } diff --git a/src/runtime/mfixalloc.go b/src/runtime/mfixalloc.go index f9dd6ca474..293c16b38b 100644 --- a/src/runtime/mfixalloc.go +++ b/src/runtime/mfixalloc.go @@ -32,7 +32,7 @@ type fixalloc struct { chunk uintptr // use uintptr instead of unsafe.Pointer to avoid write barriers nchunk uint32 inuse uintptr // in-use bytes now - stat *uint64 + stat *sysMemStat zero bool // zero allocations } @@ -49,7 +49,7 @@ type mlink struct { // Initialize f to allocate objects of the given size, // using the allocator to obtain chunks of memory. -func (f *fixalloc) init(size uintptr, first func(arg, p unsafe.Pointer), arg unsafe.Pointer, stat *uint64) { +func (f *fixalloc) init(size uintptr, first func(arg, p unsafe.Pointer), arg unsafe.Pointer, stat *sysMemStat) { f.size = size f.first = first f.arg = arg diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go index 6328b295ca..8b1a0be353 100644 --- a/src/runtime/mgcscavenge.go +++ b/src/runtime/mgcscavenge.go @@ -100,7 +100,7 @@ const ( // heapRetained returns an estimate of the current heap RSS. func heapRetained() uint64 { - return atomic.Load64(&memstats.heap_sys) - atomic.Load64(&memstats.heap_released) + return memstats.heap_sys.load() - atomic.Load64(&memstats.heap_released) } // gcPaceScavenger updates the scavenger's pacing, particularly @@ -711,7 +711,7 @@ func (p *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr // Update global accounting only when not in test, otherwise // the runtime's accounting will be wrong. - mSysStatInc(&memstats.heap_released, uintptr(npages)*pageSize) + atomic.Xadd64(&memstats.heap_released, int64(npages)*pageSize) return addr } diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index df659e222b..27c1bfbcf1 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -1222,22 +1222,22 @@ HaveSpan: // sysUsed all the pages that are actually available // in the span since some of them might be scavenged. sysUsed(unsafe.Pointer(base), nbytes) - mSysStatDec(&memstats.heap_released, scav) + atomic.Xadd64(&memstats.heap_released, -int64(scav)) } // Update stats. switch typ { case spanAllocHeap: - mSysStatInc(&memstats.heap_inuse, nbytes) + atomic.Xadd64(&memstats.heap_inuse, int64(nbytes)) case spanAllocStack: - mSysStatInc(&memstats.stacks_inuse, nbytes) + atomic.Xadd64(&memstats.stacks_inuse, int64(nbytes)) case spanAllocPtrScalarBits, spanAllocWorkBuf: - mSysStatInc(&memstats.gc_sys, nbytes) + memstats.gc_sys.add(int64(nbytes)) } if typ.manual() { // Manually managed memory doesn't count toward heap_sys. - mSysStatDec(&memstats.heap_sys, nbytes) + memstats.heap_sys.add(-int64(nbytes)) } - mSysStatDec(&memstats.heap_idle, nbytes) + atomic.Xadd64(&memstats.heap_idle, -int64(nbytes)) // Publish the span in various locations. @@ -1314,8 +1314,8 @@ func (h *mheap) grow(npage uintptr) bool { // The allocation is always aligned to the heap arena // size which is always > physPageSize, so its safe to // just add directly to heap_released. - mSysStatInc(&memstats.heap_released, asize) - mSysStatInc(&memstats.heap_idle, asize) + atomic.Xadd64(&memstats.heap_released, int64(asize)) + atomic.Xadd64(&memstats.heap_idle, int64(asize)) // Recalculate nBase. // We know this won't overflow, because sysAlloc returned @@ -1400,18 +1400,20 @@ func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) { // Update stats. // // Mirrors the code in allocSpan. + nbytes := s.npages * pageSize switch typ { case spanAllocHeap: - mSysStatDec(&memstats.heap_inuse, s.npages*pageSize) + atomic.Xadd64(&memstats.heap_inuse, -int64(nbytes)) case spanAllocStack: - mSysStatDec(&memstats.stacks_inuse, s.npages*pageSize) + atomic.Xadd64(&memstats.stacks_inuse, -int64(nbytes)) case spanAllocPtrScalarBits, spanAllocWorkBuf: - mSysStatDec(&memstats.gc_sys, s.npages*pageSize) + memstats.gc_sys.add(-int64(nbytes)) } if typ.manual() { - mSysStatInc(&memstats.heap_sys, s.npages*pageSize) + // Manually managed memory doesn't count toward heap_sys, so add it back. + memstats.heap_sys.add(int64(nbytes)) } - mSysStatInc(&memstats.heap_idle, s.npages*pageSize) + atomic.Xadd64(&memstats.heap_idle, int64(nbytes)) // Mark the space as free. h.pages.free(s.base(), s.npages) diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go index 560babed03..2af1c97e0b 100644 --- a/src/runtime/mpagealloc.go +++ b/src/runtime/mpagealloc.go @@ -293,13 +293,13 @@ type pageAlloc struct { // sysStat is the runtime memstat to update when new system // memory is committed by the pageAlloc for allocation metadata. - sysStat *uint64 + sysStat *sysMemStat // Whether or not this struct is being used in tests. test bool } -func (p *pageAlloc) init(mheapLock *mutex, sysStat *uint64) { +func (p *pageAlloc) init(mheapLock *mutex, sysStat *sysMemStat) { if levelLogPages[0] > logMaxPackedValue { // We can't represent 1< 0 && int64(val) < n) || (n < 0 && int64(val)+n < n) { + print("runtime: val=", val, " n=", n, "\n") + throw("sysMemStat overflow") } } diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go index 394bd6fb0f..3f5bb7cf96 100644 --- a/src/runtime/os_darwin.go +++ b/src/runtime/os_darwin.go @@ -198,7 +198,6 @@ func newosproc(mp *m) { exit(1) } mp.g0.stack.hi = stacksize // for mstart - //mSysStatInc(&memstats.stacks_sys, stacksize) //TODO: do this? // Tell the pthread library we won't join with this thread. if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 { @@ -247,7 +246,7 @@ func newosproc0(stacksize uintptr, fn uintptr) { exit(1) } g0.stack.hi = stacksize // for mstart - mSysStatInc(&memstats.stacks_sys, stacksize) + memstats.stacks_sys.add(int64(stacksize)) // Tell the pthread library we won't join with this thread. if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {