From 70d7e96c7850d141ae48e99f02a397aed1bc474b Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 8 Oct 2018 13:51:10 -0400 Subject: [PATCH] runtime: avoid tracking spans with no objects with mcentral Lazy mcache flushing (golang.org/cl/134783) made it so that moving a span from an mcache to an mcentral was sometimes responsible for sweeping the span. However, it did a "preserving" sweep, which meant it retained ownership, even if the sweeper swept all objects in the span. As a result, we could put a completely unused span back in the mcentral. Fix this by first taking back ownership of the span into the mcentral and moving it to the right mcentral list, and then doing a non-preserving sweep. The non-preserving sweep will move the span to the heap if it sweeps all objects. Change-Id: I244b1893b44b8c00264f0928ac9239449775f617 Reviewed-on: https://go-review.googlesource.com/c/140597 Run-TryBot: Austin Clements TryBot-Result: Gobot Gobot Reviewed-by: Michael Knyszek --- src/runtime/mcentral.go | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go index baede31405..d94b95792c 100644 --- a/src/runtime/mcentral.go +++ b/src/runtime/mcentral.go @@ -153,16 +153,6 @@ func (c *mcentral) uncacheSpan(s *mspan) { throw("uncaching span but s.allocCount == 0") } - cap := int32((s.npages << _PageShift) / s.elemsize) - n := cap - int32(s.allocCount) - - // cacheSpan updated alloc assuming all objects on s were - // going to be allocated. Adjust for any that weren't. We must - // do this before potentially sweeping the span. - if n > 0 { - atomic.Xadd64(&c.nmalloc, -int64(n)) - } - sg := mheap_.sweepgen stale := s.sweepgen == sg+1 if stale { @@ -170,18 +160,23 @@ func (c *mcentral) uncacheSpan(s *mspan) { // responsibility to sweep it. // // Set sweepgen to indicate it's not cached but needs - // sweeping. sweep will set s.sweepgen to indicate s - // is swept. - s.sweepgen = sg - 1 - s.sweep(true) - // sweep may have freed objects, so recompute n. - n = cap - int32(s.allocCount) + // sweeping and can't be allocated from. sweep will + // set s.sweepgen to indicate s is swept. + atomic.Store(&s.sweepgen, sg-1) } else { // Indicate that s is no longer cached. - s.sweepgen = sg + atomic.Store(&s.sweepgen, sg) } + cap := int32((s.npages << _PageShift) / s.elemsize) + n := cap - int32(s.allocCount) if n > 0 { + // cacheSpan updated alloc assuming all objects on s + // were going to be allocated. Adjust for any that + // weren't. We must do this before potentially + // sweeping the span. + atomic.Xadd64(&c.nmalloc, -int64(n)) + lock(&c.lock) c.empty.remove(s) c.nonempty.insert(s) @@ -197,6 +192,12 @@ func (c *mcentral) uncacheSpan(s *mspan) { } unlock(&c.lock) } + + if stale { + // Now that s is in the right mcentral list, we can + // sweep it. + s.sweep(false) + } } // freeSpan updates c and s after sweeping s.