1
0
mirror of https://github.com/golang/go synced 2024-11-12 04:50:21 -07:00

[dev.garbage] runtime: fix allocfreetrace

We broke tracing of freed objects in GODEBUG=allocfreetrace=1 mode
when we removed the sweep over the mark bitmap. Fix it by
re-introducing the sweep over the bitmap specifically if we're in
allocfreetrace mode. This doesn't have to be even remotely efficient,
since the overhead of allocfreetrace is huge anyway, so we can keep
the code for this down to just a few lines.

Change-Id: I9e176b3b04c73608a0ea3068d5d0cd30760ebd40
Reviewed-on: https://go-review.googlesource.com/22592
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
This commit is contained in:
Austin Clements 2016-04-28 15:49:39 -04:00
parent 38f674687a
commit 6d11490539
2 changed files with 15 additions and 17 deletions

View File

@ -743,23 +743,6 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gcmarknewobject(uintptr(x), size, scanSize)
}
// The object x is about to be reused but tracefree and msanfree
// need to be informed.
// TODO:(rlh) It is quite possible that this object is being allocated
// out of a fresh span and that there is no preceding call to
// tracealloc with this object. If this is an issue then initialization
// of the fresh span needs to leave some crumbs around that can be used to
// avoid these calls. Furthermore these crumbs a likely the same as
// those needed to determine if the object needs to be zeroed.
// In the case of msanfree it does not make sense to call msanfree
// followed by msanmalloc. msanfree indicates that the bytes are not
// initialized but msanmalloc is about to indicate that they are.
// It makes no difference whether msanmalloc has been called on these
// bytes or not.
if debug.allocfreetrace != 0 {
tracefree(unsafe.Pointer(x), size)
}
if raceenabled {
racemalloc(x, size)
}

View File

@ -251,6 +251,21 @@ func (s *mspan) sweep(preserve bool) bool {
}
}
if debug.allocfreetrace != 0 {
// Find all newly freed objects. This doesn't have to
// efficient; allocfreetrace has massive overhead.
mbits := s.markBitsForBase()
abits := s.allocBitsForIndex(0)
for i := uintptr(0); i < s.nelems; i++ {
if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
x := s.base() + i*s.elemsize
tracefree(unsafe.Pointer(x), size)
}
mbits.advance()
abits.advance()
}
}
// Count the number of free objects in this span.
nfree = s.countFree()
if cl == 0 && nfree != 0 {