1
0
mirror of https://github.com/golang/go synced 2024-11-19 13:04:45 -07:00
go/src/runtime/mfinal.go
Keith Randall 2c990f4596 runtime: add a use of runtime.KeepAlive's argument
This makes sure that its argument is marked live on entry.
We need its arg to be live so defers of KeepAlive get
scanned correctly by the GC.

Fixes #21402

Change-Id: I906813e433d0e9726ca46483723303338da5b4d7
Reviewed-on: https://go-review.googlesource.com/55150
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2017-08-14 20:11:21 +00:00

494 lines
16 KiB
Go

// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Garbage collector: finalizers and block profiling.
package runtime
import (
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
// finblock is an array of finalizers to be executed. finblocks are
// arranged in a linked list for the finalizer queue.
//
// finblock is allocated from non-GC'd memory, so any heap pointers
// must be specially handled. GC currently assumes that the finalizer
// queue does not grow during marking (but it can shrink).
//
//go:notinheap
type finblock struct {
alllink *finblock
next *finblock
cnt uint32
_ int32
fin [(_FinBlockSize - 2*sys.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
}
var finlock mutex // protects the following variables
var fing *g // goroutine that runs finalizers
var finq *finblock // list of finalizers that are to be executed
var finc *finblock // cache of free blocks
var finptrmask [_FinBlockSize / sys.PtrSize / 8]byte
var fingwait bool
var fingwake bool
var allfin *finblock // list of all blocks
// NOTE: Layout known to queuefinalizer.
type finalizer struct {
fn *funcval // function to call (may be a heap pointer)
arg unsafe.Pointer // ptr to object (may be a heap pointer)
nret uintptr // bytes of return values from fn
fint *_type // type of first argument of fn
ot *ptrtype // type of ptr to object (may be a heap pointer)
}
var finalizer1 = [...]byte{
// Each Finalizer is 5 words, ptr ptr INT ptr ptr (INT = uintptr here)
// Each byte describes 8 words.
// Need 8 Finalizers described by 5 bytes before pattern repeats:
// ptr ptr INT ptr ptr
// ptr ptr INT ptr ptr
// ptr ptr INT ptr ptr
// ptr ptr INT ptr ptr
// ptr ptr INT ptr ptr
// ptr ptr INT ptr ptr
// ptr ptr INT ptr ptr
// ptr ptr INT ptr ptr
// aka
//
// ptr ptr INT ptr ptr ptr ptr INT
// ptr ptr ptr ptr INT ptr ptr ptr
// ptr INT ptr ptr ptr ptr INT ptr
// ptr ptr ptr INT ptr ptr ptr ptr
// INT ptr ptr ptr ptr INT ptr ptr
//
// Assumptions about Finalizer layout checked below.
1<<0 | 1<<1 | 0<<2 | 1<<3 | 1<<4 | 1<<5 | 1<<6 | 0<<7,
1<<0 | 1<<1 | 1<<2 | 1<<3 | 0<<4 | 1<<5 | 1<<6 | 1<<7,
1<<0 | 0<<1 | 1<<2 | 1<<3 | 1<<4 | 1<<5 | 0<<6 | 1<<7,
1<<0 | 1<<1 | 1<<2 | 0<<3 | 1<<4 | 1<<5 | 1<<6 | 1<<7,
0<<0 | 1<<1 | 1<<2 | 1<<3 | 1<<4 | 0<<5 | 1<<6 | 1<<7,
}
func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) {
if gcphase != _GCoff {
// Currently we assume that the finalizer queue won't
// grow during marking so we don't have to rescan it
// during mark termination. If we ever need to lift
// this assumption, we can do it by adding the
// necessary barriers to queuefinalizer (which it may
// have automatically).
throw("queuefinalizer during GC")
}
lock(&finlock)
if finq == nil || finq.cnt == uint32(len(finq.fin)) {
if finc == nil {
finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gc_sys))
finc.alllink = allfin
allfin = finc
if finptrmask[0] == 0 {
// Build pointer mask for Finalizer array in block.
// Check assumptions made in finalizer1 array above.
if (unsafe.Sizeof(finalizer{}) != 5*sys.PtrSize ||
unsafe.Offsetof(finalizer{}.fn) != 0 ||
unsafe.Offsetof(finalizer{}.arg) != sys.PtrSize ||
unsafe.Offsetof(finalizer{}.nret) != 2*sys.PtrSize ||
unsafe.Offsetof(finalizer{}.fint) != 3*sys.PtrSize ||
unsafe.Offsetof(finalizer{}.ot) != 4*sys.PtrSize) {
throw("finalizer out of sync")
}
for i := range finptrmask {
finptrmask[i] = finalizer1[i%len(finalizer1)]
}
}
}
block := finc
finc = block.next
block.next = finq
finq = block
}
f := &finq.fin[finq.cnt]
atomic.Xadd(&finq.cnt, +1) // Sync with markroots
f.fn = fn
f.nret = nret
f.fint = fint
f.ot = ot
f.arg = p
fingwake = true
unlock(&finlock)
}
//go:nowritebarrier
func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) {
for fb := allfin; fb != nil; fb = fb.alllink {
for i := uint32(0); i < fb.cnt; i++ {
f := &fb.fin[i]
callback(f.fn, f.arg, f.nret, f.fint, f.ot)
}
}
}
func wakefing() *g {
var res *g
lock(&finlock)
if fingwait && fingwake {
fingwait = false
fingwake = false
res = fing
}
unlock(&finlock)
return res
}
var (
fingCreate uint32
fingRunning bool
)
func createfing() {
// start the finalizer goroutine exactly once
if fingCreate == 0 && atomic.Cas(&fingCreate, 0, 1) {
go runfinq()
}
}
// This is the goroutine that runs all of the finalizers
func runfinq() {
var (
frame unsafe.Pointer
framecap uintptr
)
for {
lock(&finlock)
fb := finq
finq = nil
if fb == nil {
gp := getg()
fing = gp
fingwait = true
goparkunlock(&finlock, "finalizer wait", traceEvGoBlock, 1)
continue
}
unlock(&finlock)
if raceenabled {
racefingo()
}
for fb != nil {
for i := fb.cnt; i > 0; i-- {
f := &fb.fin[i-1]
framesz := unsafe.Sizeof((interface{})(nil)) + f.nret
if framecap < framesz {
// The frame does not contain pointers interesting for GC,
// all not yet finalized objects are stored in finq.
// If we do not mark it as FlagNoScan,
// the last finalized object is not collected.
frame = mallocgc(framesz, nil, true)
framecap = framesz
}
if f.fint == nil {
throw("missing type in runfinq")
}
// frame is effectively uninitialized
// memory. That means we have to clear
// it before writing to it to avoid
// confusing the write barrier.
*(*[2]uintptr)(frame) = [2]uintptr{}
switch f.fint.kind & kindMask {
case kindPtr:
// direct use of pointer
*(*unsafe.Pointer)(frame) = f.arg
case kindInterface:
ityp := (*interfacetype)(unsafe.Pointer(f.fint))
// set up with empty interface
(*eface)(frame)._type = &f.ot.typ
(*eface)(frame).data = f.arg
if len(ityp.mhdr) != 0 {
// convert to interface with methods
// this conversion is guaranteed to succeed - we checked in SetFinalizer
*(*iface)(frame) = assertE2I(ityp, *(*eface)(frame))
}
default:
throw("bad kind in runfinq")
}
fingRunning = true
reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz))
fingRunning = false
// Drop finalizer queue heap references
// before hiding them from markroot.
// This also ensures these will be
// clear if we reuse the finalizer.
f.fn = nil
f.arg = nil
f.ot = nil
atomic.Store(&fb.cnt, i-1)
}
next := fb.next
lock(&finlock)
fb.next = finc
finc = fb
unlock(&finlock)
fb = next
}
}
}
// SetFinalizer sets the finalizer associated with obj to the provided
// finalizer function. When the garbage collector finds an unreachable block
// with an associated finalizer, it clears the association and runs
// finalizer(obj) in a separate goroutine. This makes obj reachable again,
// but now without an associated finalizer. Assuming that SetFinalizer
// is not called again, the next time the garbage collector sees
// that obj is unreachable, it will free obj.
//
// SetFinalizer(obj, nil) clears any finalizer associated with obj.
//
// The argument obj must be a pointer to an object allocated by calling
// new, by taking the address of a composite literal, or by taking the
// address of a local variable.
// The argument finalizer must be a function that takes a single argument
// to which obj's type can be assigned, and can have arbitrary ignored return
// values. If either of these is not true, SetFinalizer may abort the
// program.
//
// Finalizers are run in dependency order: if A points at B, both have
// finalizers, and they are otherwise unreachable, only the finalizer
// for A runs; once A is freed, the finalizer for B can run.
// If a cyclic structure includes a block with a finalizer, that
// cycle is not guaranteed to be garbage collected and the finalizer
// is not guaranteed to run, because there is no ordering that
// respects the dependencies.
//
// The finalizer for obj is scheduled to run at some arbitrary time after
// obj becomes unreachable.
// There is no guarantee that finalizers will run before a program exits,
// so typically they are useful only for releasing non-memory resources
// associated with an object during a long-running program.
// For example, an os.File object could use a finalizer to close the
// associated operating system file descriptor when a program discards
// an os.File without calling Close, but it would be a mistake
// to depend on a finalizer to flush an in-memory I/O buffer such as a
// bufio.Writer, because the buffer would not be flushed at program exit.
//
// It is not guaranteed that a finalizer will run if the size of *obj is
// zero bytes.
//
// It is not guaranteed that a finalizer will run for objects allocated
// in initializers for package-level variables. Such objects may be
// linker-allocated, not heap-allocated.
//
// A finalizer may run as soon as an object becomes unreachable.
// In order to use finalizers correctly, the program must ensure that
// the object is reachable until it is no longer required.
// Objects stored in global variables, or that can be found by tracing
// pointers from a global variable, are reachable. For other objects,
// pass the object to a call of the KeepAlive function to mark the
// last point in the function where the object must be reachable.
//
// For example, if p points to a struct that contains a file descriptor d,
// and p has a finalizer that closes that file descriptor, and if the last
// use of p in a function is a call to syscall.Write(p.d, buf, size), then
// p may be unreachable as soon as the program enters syscall.Write. The
// finalizer may run at that moment, closing p.d, causing syscall.Write
// to fail because it is writing to a closed file descriptor (or, worse,
// to an entirely different file descriptor opened by a different goroutine).
// To avoid this problem, call runtime.KeepAlive(p) after the call to
// syscall.Write.
//
// A single goroutine runs all finalizers for a program, sequentially.
// If a finalizer must run for a long time, it should do so by starting
// a new goroutine.
func SetFinalizer(obj interface{}, finalizer interface{}) {
if debug.sbrk != 0 {
// debug.sbrk never frees memory, so no finalizers run
// (and we don't have the data structures to record them).
return
}
e := efaceOf(&obj)
etyp := e._type
if etyp == nil {
throw("runtime.SetFinalizer: first argument is nil")
}
if etyp.kind&kindMask != kindPtr {
throw("runtime.SetFinalizer: first argument is " + etyp.string() + ", not pointer")
}
ot := (*ptrtype)(unsafe.Pointer(etyp))
if ot.elem == nil {
throw("nil elem type!")
}
// find the containing object
_, base, _ := findObject(e.data)
if base == nil {
// 0-length objects are okay.
if e.data == unsafe.Pointer(&zerobase) {
return
}
// Global initializers might be linker-allocated.
// var Foo = &Object{}
// func main() {
// runtime.SetFinalizer(Foo, nil)
// }
// The relevant segments are: noptrdata, data, bss, noptrbss.
// We cannot assume they are in any order or even contiguous,
// due to external linking.
for datap := &firstmoduledata; datap != nil; datap = datap.next {
if datap.noptrdata <= uintptr(e.data) && uintptr(e.data) < datap.enoptrdata ||
datap.data <= uintptr(e.data) && uintptr(e.data) < datap.edata ||
datap.bss <= uintptr(e.data) && uintptr(e.data) < datap.ebss ||
datap.noptrbss <= uintptr(e.data) && uintptr(e.data) < datap.enoptrbss {
return
}
}
throw("runtime.SetFinalizer: pointer not in allocated block")
}
if e.data != base {
// As an implementation detail we allow to set finalizers for an inner byte
// of an object if it could come from tiny alloc (see mallocgc for details).
if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize {
throw("runtime.SetFinalizer: pointer not at beginning of allocated block")
}
}
f := efaceOf(&finalizer)
ftyp := f._type
if ftyp == nil {
// switch to system stack and remove finalizer
systemstack(func() {
removefinalizer(e.data)
})
return
}
if ftyp.kind&kindMask != kindFunc {
throw("runtime.SetFinalizer: second argument is " + ftyp.string() + ", not a function")
}
ft := (*functype)(unsafe.Pointer(ftyp))
if ft.dotdotdot() {
throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string() + " because dotdotdot")
}
if ft.inCount != 1 {
throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string())
}
fint := ft.in()[0]
switch {
case fint == etyp:
// ok - same type
goto okarg
case fint.kind&kindMask == kindPtr:
if (fint.uncommon() == nil || etyp.uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem {
// ok - not same type, but both pointers,
// one or the other is unnamed, and same element type, so assignable.
goto okarg
}
case fint.kind&kindMask == kindInterface:
ityp := (*interfacetype)(unsafe.Pointer(fint))
if len(ityp.mhdr) == 0 {
// ok - satisfies empty interface
goto okarg
}
if _, ok := assertE2I2(ityp, *efaceOf(&obj)); ok {
goto okarg
}
}
throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string())
okarg:
// compute size needed for return parameters
nret := uintptr(0)
for _, t := range ft.out() {
nret = round(nret, uintptr(t.align)) + uintptr(t.size)
}
nret = round(nret, sys.PtrSize)
// make sure we have a finalizer goroutine
createfing()
systemstack(func() {
if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
throw("runtime.SetFinalizer: finalizer already set")
}
})
}
// Look up pointer v in heap. Return the span containing the object,
// the start of the object, and the size of the object. If the object
// does not exist, return nil, nil, 0.
func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
c := gomcache()
c.local_nlookup++
if sys.PtrSize == 4 && c.local_nlookup >= 1<<30 {
// purge cache stats to prevent overflow
lock(&mheap_.lock)
purgecachedstats(c)
unlock(&mheap_.lock)
}
// find span
arena_start := mheap_.arena_start
arena_used := mheap_.arena_used
if uintptr(v) < arena_start || uintptr(v) >= arena_used {
return
}
p := uintptr(v) >> pageShift
q := p - arena_start>>pageShift
s = mheap_.spans[q]
if s == nil {
return
}
x = unsafe.Pointer(s.base())
if uintptr(v) < uintptr(x) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != mSpanInUse {
s = nil
x = nil
return
}
n = s.elemsize
if s.spanclass.sizeclass() != 0 {
x = add(x, (uintptr(v)-uintptr(x))/n*n)
}
return
}
// Mark KeepAlive as noinline so that it is easily detectable as an intrinsic.
//go:noinline
// KeepAlive marks its argument as currently reachable.
// This ensures that the object is not freed, and its finalizer is not run,
// before the point in the program where KeepAlive is called.
//
// A very simplified example showing where KeepAlive is required:
// type File struct { d int }
// d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0)
// // ... do something if err != nil ...
// p := &File{d}
// runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) })
// var buf [10]byte
// n, err := syscall.Read(p.d, buf[:])
// // Ensure p is not finalized until Read returns.
// runtime.KeepAlive(p)
// // No more uses of p after this point.
//
// Without the KeepAlive call, the finalizer could run at the start of
// syscall.Read, closing the file descriptor before syscall.Read makes
// the actual system call.
func KeepAlive(x interface{}) {
// Introduce a use of x that the compiler can't eliminate.
// This makes sure x is alive on entry. We need x to be alive
// on entry for "defer runtime.KeepAlive(x)"; see issue 21402.
if cgoAlwaysFalse {
println(x)
}
}