mirror of
https://github.com/golang/go
synced 2024-11-19 10:04:56 -07:00
runtime: allocate GC workbufs from manually-managed spans
Currently the runtime allocates workbufs from persistent memory, which means they can never be freed. Switch to allocating them from manually-managed heap spans. This doesn't free them yet, but it puts us in a position to do so. For #19325. Change-Id: I94b2512a2f2bbbb456cd9347761b9412e80d2da9 Reviewed-on: https://go-review.googlesource.com/38581 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
This commit is contained in:
parent
42c1214762
commit
9cc883a466
@ -795,6 +795,16 @@ var work struct {
|
|||||||
empty lfstack // lock-free list of empty blocks workbuf
|
empty lfstack // lock-free list of empty blocks workbuf
|
||||||
pad0 [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
|
pad0 [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
|
||||||
|
|
||||||
|
wbufSpans struct {
|
||||||
|
lock mutex
|
||||||
|
// busy is a list of all spans containing workbufs on
|
||||||
|
// one of the workbuf lists.
|
||||||
|
busy mSpanList
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore 64-bit alignment on 32-bit.
|
||||||
|
_ uint32
|
||||||
|
|
||||||
// bytesMarked is the number of bytes marked this cycle. This
|
// bytesMarked is the number of bytes marked this cycle. This
|
||||||
// includes bytes blackened in scanned objects, noscan objects
|
// includes bytes blackened in scanned objects, noscan objects
|
||||||
// that go straight to black, and permagrey objects scanned by
|
// that go straight to black, and permagrey objects scanned by
|
||||||
|
@ -12,8 +12,22 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
_WorkbufSize = 2048 // in bytes; larger values result in less contention
|
_WorkbufSize = 2048 // in bytes; larger values result in less contention
|
||||||
|
|
||||||
|
// workbufAlloc is the number of bytes to allocate at a time
|
||||||
|
// for new workbufs. This must be a multiple of pageSize and
|
||||||
|
// should be a multiple of _WorkbufSize.
|
||||||
|
//
|
||||||
|
// Larger values reduce workbuf allocation overhead. Smaller
|
||||||
|
// values reduce heap fragmentation.
|
||||||
|
workbufAlloc = 32 << 10
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
|
||||||
|
throw("bad workbufAlloc")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Garbage collector work pool abstraction.
|
// Garbage collector work pool abstraction.
|
||||||
//
|
//
|
||||||
// This implements a producer/consumer model for pointers to grey
|
// This implements a producer/consumer model for pointers to grey
|
||||||
@ -318,7 +332,29 @@ func getempty() *workbuf {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if b == nil {
|
if b == nil {
|
||||||
b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), sys.CacheLineSize, &memstats.gc_sys))
|
// Allocate more workbufs.
|
||||||
|
var s *mspan
|
||||||
|
systemstack(func() {
|
||||||
|
s = mheap_.allocManual(workbufAlloc/pageSize, &memstats.gc_sys)
|
||||||
|
})
|
||||||
|
if s == nil {
|
||||||
|
throw("out of memory")
|
||||||
|
}
|
||||||
|
// Record the new span in the busy list.
|
||||||
|
lock(&work.wbufSpans.lock)
|
||||||
|
work.wbufSpans.busy.insert(s)
|
||||||
|
unlock(&work.wbufSpans.lock)
|
||||||
|
// Slice up the span into new workbufs. Return one and
|
||||||
|
// put the rest on the empty list.
|
||||||
|
for i := uintptr(0); i+_WorkbufSize <= workbufAlloc; i += _WorkbufSize {
|
||||||
|
newb := (*workbuf)(unsafe.Pointer(s.base() + i))
|
||||||
|
newb.nobj = 0
|
||||||
|
if i == 0 {
|
||||||
|
b = newb
|
||||||
|
} else {
|
||||||
|
putempty(newb)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user