1
0
mirror of https://github.com/golang/go synced 2024-11-23 07:10:05 -07:00

runtime: eliminate mheap.busy* lists

The old whole-page reclaimer was the only thing that used the busy
span lists. Remove them so nothing uses them any more.

Change-Id: I4007dd2be08b9ef41bfdb0c387215c73c392cc4c
Reviewed-on: https://go-review.googlesource.com/c/138960
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
This commit is contained in:
Austin Clements 2018-09-27 11:50:46 -04:00
parent 5333550bdc
commit b00a6d8bfe

View File

@ -30,12 +30,11 @@ const minPhysPageSize = 4096
//go:notinheap //go:notinheap
type mheap struct { type mheap struct {
lock mutex lock mutex
free mTreap // free and non-scavenged spans free mTreap // free and non-scavenged spans
scav mTreap // free and scavenged spans scav mTreap // free and scavenged spans
busy mSpanList // busy list of spans sweepgen uint32 // sweep generation, see comment in mspan
sweepgen uint32 // sweep generation, see comment in mspan sweepdone uint32 // all spans are swept
sweepdone uint32 // all spans are swept sweepers uint32 // number of active sweepone calls
sweepers uint32 // number of active sweepone calls
// allspans is a slice of all mspans ever created. Each mspan // allspans is a slice of all mspans ever created. Each mspan
// appears exactly once. // appears exactly once.
@ -676,7 +675,7 @@ func (h *mheap) init() {
h.spanalloc.zero = false h.spanalloc.zero = false
// h->mapcache needs no init // h->mapcache needs no init
h.busy.init()
for i := range h.central { for i := range h.central {
h.central[i].mcentral.init(spanClass(i)) h.central[i].mcentral.init(spanClass(i))
} }
@ -893,8 +892,6 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
mheap_.largealloc += uint64(s.elemsize) mheap_.largealloc += uint64(s.elemsize)
mheap_.nlargealloc++ mheap_.nlargealloc++
atomic.Xadd64(&memstats.heap_live, int64(npage<<_PageShift)) atomic.Xadd64(&memstats.heap_live, int64(npage<<_PageShift))
// Swept spans are at the end of lists.
h.busy.insertBack(s)
} }
} }
// heap_scan and heap_live were updated. // heap_scan and heap_live were updated.
@ -1199,9 +1196,6 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
memstats.heap_idle += uint64(s.npages << _PageShift) memstats.heap_idle += uint64(s.npages << _PageShift)
} }
s.state = mSpanFree s.state = mSpanFree
if s.inList() {
h.busy.remove(s)
}
// Stamp newly unused spans. The scavenger will use that // Stamp newly unused spans. The scavenger will use that
// info to potentially give back some pages to the OS. // info to potentially give back some pages to the OS.