diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 48b3f5364aa..e35a8331fab 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -351,6 +351,34 @@ func (s *mspan) layout() (size, n, total uintptr) { return } +func (s *mspan) scavenge() uintptr { + start := s.base() + end := start + s.npages<<_PageShift + if physPageSize > _PageSize { + // We can only release pages in + // physPageSize blocks, so round start + // and end in. (Otherwise, madvise + // will round them *out* and release + // more memory than we want.) + start = (start + physPageSize - 1) &^ (physPageSize - 1) + end &^= physPageSize - 1 + if end <= start { + // start and end don't span a + // whole physical page. + return 0 + } + } + len := end - start + released := len - (s.npreleased << _PageShift) + if physPageSize > _PageSize && released == 0 { + return 0 + } + memstats.heap_released += uint64(released) + s.npreleased = len >> _PageShift + sysUnused(unsafe.Pointer(start), len) + return released +} + // recordspan adds a newly allocated span to h.allspans. // // This only happens the first time a span is allocated from @@ -1087,35 +1115,12 @@ func (h *mheap) busyList(npages uintptr) *mSpanList { func scavengeTreapNode(t *treapNode, now, limit uint64) uintptr { s := t.spanKey - var sumreleased uintptr if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages { - start := s.base() - end := start + s.npages<<_PageShift - if physPageSize > _PageSize { - // We can only release pages in - // physPageSize blocks, so round start - // and end in. (Otherwise, madvise - // will round them *out* and release - // more memory than we want.) - start = (start + physPageSize - 1) &^ (physPageSize - 1) - end &^= physPageSize - 1 - if end <= start { - // start and end don't span a - // whole physical page. - return sumreleased - } + if released := s.scavenge(); released != 0 { + return released } - len := end - start - released := len - (s.npreleased << _PageShift) - if physPageSize > _PageSize && released == 0 { - return sumreleased - } - memstats.heap_released += uint64(released) - sumreleased += released - s.npreleased = len >> _PageShift - sysUnused(unsafe.Pointer(start), len) } - return sumreleased + return 0 } func scavengelist(list *mSpanList, now, limit uint64) uintptr { @@ -1128,32 +1133,7 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr { if (now-uint64(s.unusedsince)) <= limit || s.npreleased == s.npages { continue } - start := s.base() - end := start + s.npages<<_PageShift - if physPageSize > _PageSize { - // We can only release pages in - // physPageSize blocks, so round start - // and end in. (Otherwise, madvise - // will round them *out* and release - // more memory than we want.) - start = (start + physPageSize - 1) &^ (physPageSize - 1) - end &^= physPageSize - 1 - if end <= start { - // start and end don't span a - // whole physical page. - continue - } - } - len := end - start - - released := len - (s.npreleased << _PageShift) - if physPageSize > _PageSize && released == 0 { - continue - } - memstats.heap_released += uint64(released) - sumreleased += released - s.npreleased = len >> _PageShift - sysUnused(unsafe.Pointer(start), len) + sumreleased += s.scavenge() } return sumreleased }