2016-03-01 15:57:46 -07:00
|
|
|
// Copyright 2015 The Go Authors. All rights reserved.
|
2015-11-13 18:45:22 -07:00
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
// Code to check that pointer writes follow the cgo rules.
|
|
|
|
// These functions are invoked via the write barrier when debug.cgocheck > 1.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
|
|
import (
|
|
|
|
"runtime/internal/sys"
|
|
|
|
"unsafe"
|
|
|
|
)
|
|
|
|
|
|
|
|
const cgoWriteBarrierFail = "Go pointer stored into non-Go memory"
|
|
|
|
|
|
|
|
// cgoCheckWriteBarrier is called whenever a pointer is stored into memory.
|
|
|
|
// It throws if the program is storing a Go pointer into non-Go memory.
|
runtime: buffered write barrier implementation
This implements runtime support for buffered write barriers on amd64.
The buffered write barrier has a fast path that simply enqueues
pointers in a per-P buffer. Unlike the current write barrier, this
fast path is *not* a normal Go call and does not require the compiler
to spill general-purpose registers or put arguments on the stack. When
the buffer fills up, the write barrier takes the slow path, which
spills all general purpose registers and flushes the buffer. We don't
allow safe-points or stack splits while this frame is active, so it
doesn't matter that we have no type information for the spilled
registers in this frame.
One minor complication is cgocheck=2 mode, which uses the write
barrier to detect Go pointers being written to non-Go memory. We
obviously can't buffer this, so instead we set the buffer to its
minimum size, forcing the write barrier into the slow path on every
call. For this specific case, we pass additional information as
arguments to the flush function. This also requires enabling the cgo
write barrier slightly later during runtime initialization, after Ps
(and the per-P write barrier buffers) have been initialized.
The code in this CL is not yet active. The next CL will modify the
compiler to generate calls to the new write barrier.
This reduces the average cost of the write barrier by roughly a factor
of 4, which will pay for the cost of having it enabled more of the
time after we make the GC pacer less aggressive. (Benchmarks will be
in the next CL.)
Updates #14951.
Updates #22460.
Change-Id: I396b5b0e2c5e5c4acfd761a3235fd15abadc6cb1
Reviewed-on: https://go-review.googlesource.com/73711
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
2017-10-26 10:21:16 -06:00
|
|
|
//
|
|
|
|
// This is called from the write barrier, so its entire call tree must
|
|
|
|
// be nosplit.
|
|
|
|
//
|
2015-11-13 18:45:22 -07:00
|
|
|
//go:nosplit
|
|
|
|
//go:nowritebarrier
|
|
|
|
func cgoCheckWriteBarrier(dst *uintptr, src uintptr) {
|
|
|
|
if !cgoIsGoPointer(unsafe.Pointer(src)) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if cgoIsGoPointer(unsafe.Pointer(dst)) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are running on the system stack then dst might be an
|
|
|
|
// address on the stack, which is OK.
|
|
|
|
g := getg()
|
|
|
|
if g == g.m.g0 || g == g.m.gsignal {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allocating memory can write to various mfixalloc structs
|
|
|
|
// that look like they are non-Go memory.
|
|
|
|
if g.m.mallocing != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
systemstack(func() {
|
|
|
|
println("write of Go pointer", hex(src), "to non-Go memory", hex(uintptr(unsafe.Pointer(dst))))
|
|
|
|
throw(cgoWriteBarrierFail)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// cgoCheckMemmove is called when moving a block of memory.
|
|
|
|
// dst and src point off bytes into the value to copy.
|
|
|
|
// size is the number of bytes to copy.
|
|
|
|
// It throws if the program is copying a block that contains a Go pointer
|
|
|
|
// into non-Go memory.
|
|
|
|
//go:nosplit
|
|
|
|
//go:nowritebarrier
|
|
|
|
func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
|
|
|
|
if typ.kind&kindNoPointers != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !cgoIsGoPointer(src) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if cgoIsGoPointer(dst) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
cgoCheckTypedBlock(typ, src, off, size)
|
|
|
|
}
|
|
|
|
|
|
|
|
// cgoCheckSliceCopy is called when copying n elements of a slice from
|
|
|
|
// src to dst. typ is the element type of the slice.
|
|
|
|
// It throws if the program is copying slice elements that contain Go pointers
|
|
|
|
// into non-Go memory.
|
|
|
|
//go:nosplit
|
|
|
|
//go:nowritebarrier
|
|
|
|
func cgoCheckSliceCopy(typ *_type, dst, src slice, n int) {
|
|
|
|
if typ.kind&kindNoPointers != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !cgoIsGoPointer(src.array) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if cgoIsGoPointer(dst.array) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
p := src.array
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
cgoCheckTypedBlock(typ, p, 0, typ.size)
|
|
|
|
p = add(p, typ.size)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// cgoCheckTypedBlock checks the block of memory at src, for up to size bytes,
|
2016-03-01 16:21:55 -07:00
|
|
|
// and throws if it finds a Go pointer. The type of the memory is typ,
|
2015-11-13 18:45:22 -07:00
|
|
|
// and src is off bytes into that type.
|
|
|
|
//go:nosplit
|
|
|
|
//go:nowritebarrier
|
|
|
|
func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
|
2016-05-18 14:19:24 -06:00
|
|
|
// Anything past typ.ptrdata is not a pointer.
|
|
|
|
if typ.ptrdata <= off {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if ptrdataSize := typ.ptrdata - off; size > ptrdataSize {
|
|
|
|
size = ptrdataSize
|
|
|
|
}
|
|
|
|
|
2015-11-13 18:45:22 -07:00
|
|
|
if typ.kind&kindGCProg == 0 {
|
|
|
|
cgoCheckBits(src, typ.gcdata, off, size)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-03-01 16:21:55 -07:00
|
|
|
// The type has a GC program. Try to find GC bits somewhere else.
|
2016-10-30 18:30:38 -06:00
|
|
|
for _, datap := range activeModules() {
|
2015-11-13 18:45:22 -07:00
|
|
|
if cgoInRange(src, datap.data, datap.edata) {
|
|
|
|
doff := uintptr(src) - datap.data
|
|
|
|
cgoCheckBits(add(src, -doff), datap.gcdatamask.bytedata, off+doff, size)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if cgoInRange(src, datap.bss, datap.ebss) {
|
|
|
|
boff := uintptr(src) - datap.bss
|
|
|
|
cgoCheckBits(add(src, -boff), datap.gcbssmask.bytedata, off+boff, size)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-04 09:02:59 -07:00
|
|
|
s := spanOfUnchecked(uintptr(src))
|
2017-03-16 12:16:31 -06:00
|
|
|
if s.state == _MSpanManual {
|
2015-11-13 18:45:22 -07:00
|
|
|
// There are no heap bits for value stored on the stack.
|
|
|
|
// For a channel receive src might be on the stack of some
|
|
|
|
// other goroutine, so we can't unwind the stack even if
|
|
|
|
// we wanted to.
|
|
|
|
// We can't expand the GC program without extra storage
|
|
|
|
// space we can't easily get.
|
|
|
|
// Fortunately we have the type information.
|
|
|
|
systemstack(func() {
|
|
|
|
cgoCheckUsingType(typ, src, off, size)
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// src must be in the regular heap.
|
|
|
|
|
|
|
|
hbits := heapBitsForAddr(uintptr(src))
|
|
|
|
for i := uintptr(0); i < off+size; i += sys.PtrSize {
|
|
|
|
bits := hbits.bits()
|
|
|
|
if i >= off && bits&bitPointer != 0 {
|
|
|
|
v := *(*unsafe.Pointer)(add(src, i))
|
|
|
|
if cgoIsGoPointer(v) {
|
2018-01-12 10:39:22 -07:00
|
|
|
throw(cgoWriteBarrierFail)
|
2015-11-13 18:45:22 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
hbits = hbits.next()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// cgoCheckBits checks the block of memory at src, for up to size
|
2016-03-01 16:21:55 -07:00
|
|
|
// bytes, and throws if it finds a Go pointer. The gcbits mark each
|
|
|
|
// pointer value. The src pointer is off bytes into the gcbits.
|
2015-11-13 18:45:22 -07:00
|
|
|
//go:nosplit
|
|
|
|
//go:nowritebarrier
|
|
|
|
func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) {
|
|
|
|
skipMask := off / sys.PtrSize / 8
|
|
|
|
skipBytes := skipMask * sys.PtrSize * 8
|
|
|
|
ptrmask := addb(gcbits, skipMask)
|
|
|
|
src = add(src, skipBytes)
|
|
|
|
off -= skipBytes
|
|
|
|
size += off
|
|
|
|
var bits uint32
|
|
|
|
for i := uintptr(0); i < size; i += sys.PtrSize {
|
|
|
|
if i&(sys.PtrSize*8-1) == 0 {
|
|
|
|
bits = uint32(*ptrmask)
|
|
|
|
ptrmask = addb(ptrmask, 1)
|
|
|
|
} else {
|
|
|
|
bits >>= 1
|
|
|
|
}
|
|
|
|
if off > 0 {
|
|
|
|
off -= sys.PtrSize
|
|
|
|
} else {
|
|
|
|
if bits&1 != 0 {
|
|
|
|
v := *(*unsafe.Pointer)(add(src, i))
|
|
|
|
if cgoIsGoPointer(v) {
|
2018-01-12 10:39:22 -07:00
|
|
|
throw(cgoWriteBarrierFail)
|
2015-11-13 18:45:22 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// cgoCheckUsingType is like cgoCheckTypedBlock, but is a last ditch
|
|
|
|
// fall back to look for pointers in src using the type information.
|
2016-05-18 14:19:24 -06:00
|
|
|
// We only use this when looking at a value on the stack when the type
|
2015-11-13 18:45:22 -07:00
|
|
|
// uses a GC program, because otherwise it's more efficient to use the
|
2016-03-01 16:21:55 -07:00
|
|
|
// GC bits. This is called on the system stack.
|
2015-11-13 18:45:22 -07:00
|
|
|
//go:nowritebarrier
|
|
|
|
//go:systemstack
|
|
|
|
func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) {
|
|
|
|
if typ.kind&kindNoPointers != 0 {
|
|
|
|
return
|
|
|
|
}
|
2016-05-18 14:19:24 -06:00
|
|
|
|
|
|
|
// Anything past typ.ptrdata is not a pointer.
|
|
|
|
if typ.ptrdata <= off {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if ptrdataSize := typ.ptrdata - off; size > ptrdataSize {
|
|
|
|
size = ptrdataSize
|
|
|
|
}
|
|
|
|
|
2015-11-13 18:45:22 -07:00
|
|
|
if typ.kind&kindGCProg == 0 {
|
|
|
|
cgoCheckBits(src, typ.gcdata, off, size)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
switch typ.kind & kindMask {
|
|
|
|
default:
|
|
|
|
throw("can't happen")
|
|
|
|
case kindArray:
|
|
|
|
at := (*arraytype)(unsafe.Pointer(typ))
|
|
|
|
for i := uintptr(0); i < at.len; i++ {
|
|
|
|
if off < at.elem.size {
|
|
|
|
cgoCheckUsingType(at.elem, src, off, size)
|
|
|
|
}
|
|
|
|
src = add(src, at.elem.size)
|
|
|
|
skipped := off
|
|
|
|
if skipped > at.elem.size {
|
|
|
|
skipped = at.elem.size
|
|
|
|
}
|
|
|
|
checked := at.elem.size - skipped
|
|
|
|
off -= skipped
|
|
|
|
if size <= checked {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
size -= checked
|
|
|
|
}
|
|
|
|
case kindStruct:
|
|
|
|
st := (*structtype)(unsafe.Pointer(typ))
|
|
|
|
for _, f := range st.fields {
|
|
|
|
if off < f.typ.size {
|
|
|
|
cgoCheckUsingType(f.typ, src, off, size)
|
|
|
|
}
|
|
|
|
src = add(src, f.typ.size)
|
|
|
|
skipped := off
|
|
|
|
if skipped > f.typ.size {
|
|
|
|
skipped = f.typ.size
|
|
|
|
}
|
|
|
|
checked := f.typ.size - skipped
|
|
|
|
off -= skipped
|
|
|
|
if size <= checked {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
size -= checked
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|