mirror of
https://github.com/golang/go
synced 2024-11-26 10:08:23 -07:00
[dev.typeparams] runtime: simplify defer record allocation
Now that deferred functions are always argumentless and defer records are no longer with arguments, defer record can be fixed size (just the _defer struct). This allows us to simplify the allocation of defer records, specifically, remove the defer classes and the pools of different sized defers. Change-Id: Icc4b16afc23b38262ca9dd1f7369ad40874cf701 Reviewed-on: https://go-review.googlesource.com/c/go/+/326062 Trust: Cherry Mui <cherryyz@google.com> Run-TryBot: Cherry Mui <cherryyz@google.com> Reviewed-by: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org>
This commit is contained in:
parent
4468e1cfb9
commit
e0e9fb8aff
@ -42,7 +42,6 @@ func TestIntendedInlining(t *testing.T) {
|
|||||||
"bucketMask",
|
"bucketMask",
|
||||||
"bucketShift",
|
"bucketShift",
|
||||||
"chanbuf",
|
"chanbuf",
|
||||||
"deferclass",
|
|
||||||
"evacuated",
|
"evacuated",
|
||||||
"fastlog2",
|
"fastlog2",
|
||||||
"fastrand",
|
"fastrand",
|
||||||
@ -63,7 +62,6 @@ func TestIntendedInlining(t *testing.T) {
|
|||||||
"subtract1",
|
"subtract1",
|
||||||
"subtractb",
|
"subtractb",
|
||||||
"tophash",
|
"tophash",
|
||||||
"totaldefersize",
|
|
||||||
"(*bmap).keys",
|
"(*bmap).keys",
|
||||||
"(*bmap).overflow",
|
"(*bmap).overflow",
|
||||||
"(*waitq).enqueue",
|
"(*waitq).enqueue",
|
||||||
|
@ -420,8 +420,6 @@ func mallocinit() {
|
|||||||
throw("bad TinySizeClass")
|
throw("bad TinySizeClass")
|
||||||
}
|
}
|
||||||
|
|
||||||
testdefersizes()
|
|
||||||
|
|
||||||
if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 {
|
if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 {
|
||||||
// heapBits expects modular arithmetic on bitmap
|
// heapBits expects modular arithmetic on bitmap
|
||||||
// addresses to work.
|
// addresses to work.
|
||||||
@ -1088,15 +1086,6 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
|
|||||||
|
|
||||||
var scanSize uintptr
|
var scanSize uintptr
|
||||||
if !noscan {
|
if !noscan {
|
||||||
// If allocating a defer+arg block, now that we've picked a malloc size
|
|
||||||
// large enough to hold everything, cut the "asked for" size down to
|
|
||||||
// just the defer header, so that the GC bitmap will record the arg block
|
|
||||||
// as containing nothing at all (as if it were unused space at the end of
|
|
||||||
// a malloc block caused by size rounding).
|
|
||||||
// The defer arg areas are scanned as part of scanstack.
|
|
||||||
if typ == deferType {
|
|
||||||
dataSize = unsafe.Sizeof(_defer{})
|
|
||||||
}
|
|
||||||
heapBitsSetType(uintptr(x), size, dataSize, typ)
|
heapBitsSetType(uintptr(x), size, dataSize, typ)
|
||||||
if dataSize > typ.size {
|
if dataSize > typ.size {
|
||||||
// Array allocation. If there are any
|
// Array allocation. If there are any
|
||||||
|
@ -1558,19 +1558,17 @@ func clearpools() {
|
|||||||
sched.sudogcache = nil
|
sched.sudogcache = nil
|
||||||
unlock(&sched.sudoglock)
|
unlock(&sched.sudoglock)
|
||||||
|
|
||||||
// Clear central defer pools.
|
// Clear central defer pool.
|
||||||
// Leave per-P pools alone, they have strictly bounded size.
|
// Leave per-P pools alone, they have strictly bounded size.
|
||||||
lock(&sched.deferlock)
|
lock(&sched.deferlock)
|
||||||
for i := range sched.deferpool {
|
// disconnect cached list before dropping it on the floor,
|
||||||
// disconnect cached list before dropping it on the floor,
|
// so that a dangling ref to one entry does not pin all of them.
|
||||||
// so that a dangling ref to one entry does not pin all of them.
|
var d, dlink *_defer
|
||||||
var d, dlink *_defer
|
for d = sched.deferpool; d != nil; d = dlink {
|
||||||
for d = sched.deferpool[i]; d != nil; d = dlink {
|
dlink = d.link
|
||||||
dlink = d.link
|
d.link = nil
|
||||||
d.link = nil
|
|
||||||
}
|
|
||||||
sched.deferpool[i] = nil
|
|
||||||
}
|
}
|
||||||
|
sched.deferpool = nil
|
||||||
unlock(&sched.deferlock)
|
unlock(&sched.deferlock)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -236,7 +236,7 @@ func deferproc(fn func()) {
|
|||||||
sp := getcallersp()
|
sp := getcallersp()
|
||||||
callerpc := getcallerpc()
|
callerpc := getcallerpc()
|
||||||
|
|
||||||
d := newdefer(0)
|
d := newdefer()
|
||||||
if d._panic != nil {
|
if d._panic != nil {
|
||||||
throw("deferproc: d.panic != nil after newdefer")
|
throw("deferproc: d.panic != nil after newdefer")
|
||||||
}
|
}
|
||||||
@ -302,106 +302,37 @@ func deferprocStack(d *_defer) {
|
|||||||
// been set and must not be clobbered.
|
// been set and must not be clobbered.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
|
// Each P holds a pool for defers.
|
||||||
// Each P holds a pool for defers with small arg sizes.
|
|
||||||
// Assign defer allocations to pools by rounding to 16, to match malloc size classes.
|
|
||||||
|
|
||||||
const (
|
|
||||||
deferHeaderSize = unsafe.Sizeof(_defer{})
|
|
||||||
minDeferAlloc = (deferHeaderSize + 15) &^ 15
|
|
||||||
minDeferArgs = minDeferAlloc - deferHeaderSize
|
|
||||||
)
|
|
||||||
|
|
||||||
// defer size class for arg size sz
|
|
||||||
//go:nosplit
|
|
||||||
func deferclass(siz uintptr) uintptr {
|
|
||||||
if siz <= minDeferArgs {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return (siz - minDeferArgs + 15) / 16
|
|
||||||
}
|
|
||||||
|
|
||||||
// total size of memory block for defer with arg size sz
|
|
||||||
func totaldefersize(siz uintptr) uintptr {
|
|
||||||
if siz <= minDeferArgs {
|
|
||||||
return minDeferAlloc
|
|
||||||
}
|
|
||||||
return deferHeaderSize + siz
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that defer arg sizes that map to the same defer size class
|
|
||||||
// also map to the same malloc size class.
|
|
||||||
func testdefersizes() {
|
|
||||||
var m [len(p{}.deferpool)]int32
|
|
||||||
|
|
||||||
for i := range m {
|
|
||||||
m[i] = -1
|
|
||||||
}
|
|
||||||
for i := uintptr(0); ; i++ {
|
|
||||||
defersc := deferclass(i)
|
|
||||||
if defersc >= uintptr(len(m)) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
siz := roundupsize(totaldefersize(i))
|
|
||||||
if m[defersc] < 0 {
|
|
||||||
m[defersc] = int32(siz)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if m[defersc] != int32(siz) {
|
|
||||||
print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
|
|
||||||
throw("bad defer size class")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var deferType *_type // type of _defer struct
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
var x interface{}
|
|
||||||
x = (*_defer)(nil)
|
|
||||||
deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocate a Defer, usually using per-P pool.
|
// Allocate a Defer, usually using per-P pool.
|
||||||
// Each defer must be released with freedefer. The defer is not
|
// Each defer must be released with freedefer. The defer is not
|
||||||
// added to any defer chain yet.
|
// added to any defer chain yet.
|
||||||
//
|
func newdefer() *_defer {
|
||||||
// This must not grow the stack because there may be a frame without
|
|
||||||
// stack map information when this is called.
|
|
||||||
//
|
|
||||||
//go:nosplit
|
|
||||||
func newdefer(siz int32) *_defer {
|
|
||||||
var d *_defer
|
var d *_defer
|
||||||
sc := deferclass(uintptr(siz))
|
|
||||||
gp := getg()
|
gp := getg()
|
||||||
if sc < uintptr(len(p{}.deferpool)) {
|
pp := gp.m.p.ptr()
|
||||||
pp := gp.m.p.ptr()
|
if len(pp.deferpool) == 0 && sched.deferpool != nil {
|
||||||
if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
|
// Take the slow path on the system stack so
|
||||||
// Take the slow path on the system stack so
|
// we don't grow newdefer's stack.
|
||||||
// we don't grow newdefer's stack.
|
systemstack(func() {
|
||||||
systemstack(func() {
|
lock(&sched.deferlock)
|
||||||
lock(&sched.deferlock)
|
for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
|
||||||
for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
|
d := sched.deferpool
|
||||||
d := sched.deferpool[sc]
|
sched.deferpool = d.link
|
||||||
sched.deferpool[sc] = d.link
|
d.link = nil
|
||||||
d.link = nil
|
pp.deferpool = append(pp.deferpool, d)
|
||||||
pp.deferpool[sc] = append(pp.deferpool[sc], d)
|
}
|
||||||
}
|
unlock(&sched.deferlock)
|
||||||
unlock(&sched.deferlock)
|
})
|
||||||
})
|
}
|
||||||
}
|
if n := len(pp.deferpool); n > 0 {
|
||||||
if n := len(pp.deferpool[sc]); n > 0 {
|
d = pp.deferpool[n-1]
|
||||||
d = pp.deferpool[sc][n-1]
|
pp.deferpool[n-1] = nil
|
||||||
pp.deferpool[sc][n-1] = nil
|
pp.deferpool = pp.deferpool[:n-1]
|
||||||
pp.deferpool[sc] = pp.deferpool[sc][:n-1]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if d == nil {
|
if d == nil {
|
||||||
// Allocate new defer+args.
|
// Allocate new defer.
|
||||||
systemstack(func() {
|
d = new(_defer)
|
||||||
total := roundupsize(totaldefersize(uintptr(siz)))
|
|
||||||
d = (*_defer)(mallocgc(total, deferType, true))
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
d.heap = true
|
d.heap = true
|
||||||
return d
|
return d
|
||||||
@ -424,23 +355,19 @@ func freedefer(d *_defer) {
|
|||||||
if !d.heap {
|
if !d.heap {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
sc := deferclass(0)
|
|
||||||
if sc >= uintptr(len(p{}.deferpool)) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pp := getg().m.p.ptr()
|
pp := getg().m.p.ptr()
|
||||||
if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
|
if len(pp.deferpool) == cap(pp.deferpool) {
|
||||||
// Transfer half of local cache to the central cache.
|
// Transfer half of local cache to the central cache.
|
||||||
//
|
//
|
||||||
// Take this slow path on the system stack so
|
// Take this slow path on the system stack so
|
||||||
// we don't grow freedefer's stack.
|
// we don't grow freedefer's stack.
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
var first, last *_defer
|
var first, last *_defer
|
||||||
for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
|
for len(pp.deferpool) > cap(pp.deferpool)/2 {
|
||||||
n := len(pp.deferpool[sc])
|
n := len(pp.deferpool)
|
||||||
d := pp.deferpool[sc][n-1]
|
d := pp.deferpool[n-1]
|
||||||
pp.deferpool[sc][n-1] = nil
|
pp.deferpool[n-1] = nil
|
||||||
pp.deferpool[sc] = pp.deferpool[sc][:n-1]
|
pp.deferpool = pp.deferpool[:n-1]
|
||||||
if first == nil {
|
if first == nil {
|
||||||
first = d
|
first = d
|
||||||
} else {
|
} else {
|
||||||
@ -449,8 +376,8 @@ func freedefer(d *_defer) {
|
|||||||
last = d
|
last = d
|
||||||
}
|
}
|
||||||
lock(&sched.deferlock)
|
lock(&sched.deferlock)
|
||||||
last.link = sched.deferpool[sc]
|
last.link = sched.deferpool
|
||||||
sched.deferpool[sc] = first
|
sched.deferpool = first
|
||||||
unlock(&sched.deferlock)
|
unlock(&sched.deferlock)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -469,7 +396,7 @@ func freedefer(d *_defer) {
|
|||||||
// both of which throw.
|
// both of which throw.
|
||||||
d.link = nil
|
d.link = nil
|
||||||
|
|
||||||
pp.deferpool[sc] = append(pp.deferpool[sc], d)
|
pp.deferpool = append(pp.deferpool, d)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Separate function so that it can split stack.
|
// Separate function so that it can split stack.
|
||||||
@ -720,7 +647,7 @@ func addOneOpenDeferFrame(gp *g, pc uintptr, sp unsafe.Pointer) {
|
|||||||
throw("missing deferreturn")
|
throw("missing deferreturn")
|
||||||
}
|
}
|
||||||
|
|
||||||
d1 := newdefer(0)
|
d1 := newdefer()
|
||||||
d1.openDefer = true
|
d1.openDefer = true
|
||||||
d1._panic = nil
|
d1._panic = nil
|
||||||
// These are the pc/sp to set after we've
|
// These are the pc/sp to set after we've
|
||||||
|
@ -4784,9 +4784,7 @@ func (pp *p) init(id int32) {
|
|||||||
pp.id = id
|
pp.id = id
|
||||||
pp.status = _Pgcstop
|
pp.status = _Pgcstop
|
||||||
pp.sudogcache = pp.sudogbuf[:0]
|
pp.sudogcache = pp.sudogbuf[:0]
|
||||||
for i := range pp.deferpool {
|
pp.deferpool = pp.deferpoolbuf[:0]
|
||||||
pp.deferpool[i] = pp.deferpoolbuf[i][:0]
|
|
||||||
}
|
|
||||||
pp.wbBuf.reset()
|
pp.wbBuf.reset()
|
||||||
if pp.mcache == nil {
|
if pp.mcache == nil {
|
||||||
if id == 0 {
|
if id == 0 {
|
||||||
@ -4864,12 +4862,10 @@ func (pp *p) destroy() {
|
|||||||
pp.sudogbuf[i] = nil
|
pp.sudogbuf[i] = nil
|
||||||
}
|
}
|
||||||
pp.sudogcache = pp.sudogbuf[:0]
|
pp.sudogcache = pp.sudogbuf[:0]
|
||||||
for i := range pp.deferpool {
|
for j := range pp.deferpoolbuf {
|
||||||
for j := range pp.deferpoolbuf[i] {
|
pp.deferpoolbuf[j] = nil
|
||||||
pp.deferpoolbuf[i][j] = nil
|
|
||||||
}
|
|
||||||
pp.deferpool[i] = pp.deferpoolbuf[i][:0]
|
|
||||||
}
|
}
|
||||||
|
pp.deferpool = pp.deferpoolbuf[:0]
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
for i := 0; i < pp.mspancache.len; i++ {
|
for i := 0; i < pp.mspancache.len; i++ {
|
||||||
// Safe to call since the world is stopped.
|
// Safe to call since the world is stopped.
|
||||||
|
@ -613,8 +613,8 @@ type p struct {
|
|||||||
pcache pageCache
|
pcache pageCache
|
||||||
raceprocctx uintptr
|
raceprocctx uintptr
|
||||||
|
|
||||||
deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go)
|
deferpool []*_defer // pool of available defer structs (see panic.go)
|
||||||
deferpoolbuf [5][32]*_defer
|
deferpoolbuf [32]*_defer
|
||||||
|
|
||||||
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
|
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
|
||||||
goidcache uint64
|
goidcache uint64
|
||||||
@ -801,9 +801,9 @@ type schedt struct {
|
|||||||
sudoglock mutex
|
sudoglock mutex
|
||||||
sudogcache *sudog
|
sudogcache *sudog
|
||||||
|
|
||||||
// Central pool of available defer structs of different sizes.
|
// Central pool of available defer structs.
|
||||||
deferlock mutex
|
deferlock mutex
|
||||||
deferpool [5]*_defer
|
deferpool *_defer
|
||||||
|
|
||||||
// freem is the list of m's waiting to be freed when their
|
// freem is the list of m's waiting to be freed when their
|
||||||
// m.exited is set. Linked through m.freelink.
|
// m.exited is set. Linked through m.freelink.
|
||||||
|
Loading…
Reference in New Issue
Block a user