2012-10-07 12:05:32 -06:00
|
|
|
// Copyright 2012 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
// +build race
|
|
|
|
|
|
|
|
// Public race detection API, present iff build with -race.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
|
|
import (
|
|
|
|
"unsafe"
|
|
|
|
)
|
|
|
|
|
2012-11-14 05:51:23 -07:00
|
|
|
func RaceRead(addr unsafe.Pointer)
|
|
|
|
func RaceWrite(addr unsafe.Pointer)
|
2013-06-10 12:40:35 -06:00
|
|
|
func RaceReadRange(addr unsafe.Pointer, len int)
|
|
|
|
func RaceWriteRange(addr unsafe.Pointer, len int)
|
2012-11-14 05:51:23 -07:00
|
|
|
|
2012-10-07 12:05:32 -06:00
|
|
|
func RaceSemacquire(s *uint32)
|
|
|
|
func RaceSemrelease(s *uint32)
|
2014-06-17 00:03:03 -06:00
|
|
|
|
|
|
|
// private interface for the runtime
|
|
|
|
const raceenabled = true
|
2014-07-16 15:16:19 -06:00
|
|
|
|
2015-02-14 05:54:25 -07:00
|
|
|
// For all functions accepting callerpc and pc,
|
|
|
|
// callerpc is a return PC of the function that calls this function,
|
|
|
|
// pc is start PC of the function that calls this function.
|
2014-07-16 15:16:19 -06:00
|
|
|
func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
|
2014-07-31 10:32:09 -06:00
|
|
|
kind := t.kind & kindMask
|
2014-07-16 15:16:19 -06:00
|
|
|
if kind == kindArray || kind == kindStruct {
|
|
|
|
// for composite objects we have to read every address
|
|
|
|
// because a write might happen to any subobject.
|
2014-09-04 13:53:45 -06:00
|
|
|
racereadrangepc(addr, t.size, callerpc, pc)
|
2014-07-16 15:16:19 -06:00
|
|
|
} else {
|
|
|
|
// for non-composite objects we can read just the start
|
|
|
|
// address, as any write must write the first byte.
|
|
|
|
racereadpc(addr, callerpc, pc)
|
|
|
|
}
|
|
|
|
}
|
2014-09-02 15:13:29 -06:00
|
|
|
|
|
|
|
func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
|
|
|
|
kind := t.kind & kindMask
|
|
|
|
if kind == kindArray || kind == kindStruct {
|
|
|
|
// for composite objects we have to write every address
|
|
|
|
// because a write might happen to any subobject.
|
2014-09-04 13:53:45 -06:00
|
|
|
racewriterangepc(addr, t.size, callerpc, pc)
|
2014-09-02 15:13:29 -06:00
|
|
|
} else {
|
|
|
|
// for non-composite objects we can write just the start
|
|
|
|
// address, as any write must write the first byte.
|
|
|
|
racewritepc(addr, callerpc, pc)
|
|
|
|
}
|
|
|
|
}
|
2014-09-04 13:53:45 -06:00
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
|
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
|
|
|
|
|
|
|
|
type symbolizeContext struct {
|
|
|
|
pc uintptr
|
|
|
|
fn *byte
|
|
|
|
file *byte
|
|
|
|
line uintptr
|
|
|
|
off uintptr
|
|
|
|
res uintptr
|
|
|
|
}
|
|
|
|
|
|
|
|
var qq = [...]byte{'?', '?', 0}
|
|
|
|
var dash = [...]byte{'-', 0}
|
|
|
|
|
|
|
|
// Callback from C into Go, runs on g0.
|
|
|
|
func racesymbolize(ctx *symbolizeContext) {
|
|
|
|
f := findfunc(ctx.pc)
|
|
|
|
if f == nil {
|
|
|
|
ctx.fn = &qq[0]
|
|
|
|
ctx.file = &dash[0]
|
|
|
|
ctx.line = 0
|
|
|
|
ctx.off = ctx.pc
|
|
|
|
ctx.res = 1
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-12-29 00:16:32 -07:00
|
|
|
ctx.fn = cfuncname(f)
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 12:54:31 -07:00
|
|
|
file, line := funcline(f, ctx.pc)
|
|
|
|
ctx.line = uintptr(line)
|
2014-09-04 13:53:45 -06:00
|
|
|
ctx.file = &bytes(file)[0] // assume NUL-terminated
|
|
|
|
ctx.off = ctx.pc - f.entry
|
|
|
|
ctx.res = 1
|
|
|
|
return
|
|
|
|
}
|
2015-10-16 15:01:45 -06:00
|
|
|
|
|
|
|
// Race runtime functions called via runtime·racecall.
|
|
|
|
//go:linkname __tsan_init __tsan_init
|
|
|
|
var __tsan_init byte
|
|
|
|
|
|
|
|
//go:linkname __tsan_fini __tsan_fini
|
|
|
|
var __tsan_fini byte
|
|
|
|
|
|
|
|
//go:linkname __tsan_map_shadow __tsan_map_shadow
|
|
|
|
var __tsan_map_shadow byte
|
|
|
|
|
|
|
|
//go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
|
|
|
|
var __tsan_finalizer_goroutine byte
|
|
|
|
|
|
|
|
//go:linkname __tsan_go_start __tsan_go_start
|
|
|
|
var __tsan_go_start byte
|
|
|
|
|
|
|
|
//go:linkname __tsan_go_end __tsan_go_end
|
|
|
|
var __tsan_go_end byte
|
|
|
|
|
|
|
|
//go:linkname __tsan_malloc __tsan_malloc
|
|
|
|
var __tsan_malloc byte
|
|
|
|
|
|
|
|
//go:linkname __tsan_acquire __tsan_acquire
|
|
|
|
var __tsan_acquire byte
|
|
|
|
|
|
|
|
//go:linkname __tsan_release __tsan_release
|
|
|
|
var __tsan_release byte
|
|
|
|
|
|
|
|
//go:linkname __tsan_release_merge __tsan_release_merge
|
|
|
|
var __tsan_release_merge byte
|
|
|
|
|
|
|
|
//go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
|
|
|
|
var __tsan_go_ignore_sync_begin byte
|
|
|
|
|
|
|
|
//go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
|
|
|
|
var __tsan_go_ignore_sync_end byte
|
|
|
|
|
|
|
|
// Mimic what cmd/cgo would do.
|
|
|
|
//go:cgo_import_static __tsan_init
|
|
|
|
//go:cgo_import_static __tsan_fini
|
|
|
|
//go:cgo_import_static __tsan_map_shadow
|
|
|
|
//go:cgo_import_static __tsan_finalizer_goroutine
|
|
|
|
//go:cgo_import_static __tsan_go_start
|
|
|
|
//go:cgo_import_static __tsan_go_end
|
|
|
|
//go:cgo_import_static __tsan_malloc
|
|
|
|
//go:cgo_import_static __tsan_acquire
|
|
|
|
//go:cgo_import_static __tsan_release
|
|
|
|
//go:cgo_import_static __tsan_release_merge
|
|
|
|
//go:cgo_import_static __tsan_go_ignore_sync_begin
|
|
|
|
//go:cgo_import_static __tsan_go_ignore_sync_end
|
|
|
|
|
|
|
|
// These are called from race_amd64.s.
|
|
|
|
//go:cgo_import_static __tsan_read
|
|
|
|
//go:cgo_import_static __tsan_read_pc
|
|
|
|
//go:cgo_import_static __tsan_read_range
|
|
|
|
//go:cgo_import_static __tsan_write
|
|
|
|
//go:cgo_import_static __tsan_write_pc
|
|
|
|
//go:cgo_import_static __tsan_write_range
|
|
|
|
//go:cgo_import_static __tsan_func_enter
|
|
|
|
//go:cgo_import_static __tsan_func_exit
|
|
|
|
|
|
|
|
//go:cgo_import_static __tsan_go_atomic32_load
|
|
|
|
//go:cgo_import_static __tsan_go_atomic64_load
|
|
|
|
//go:cgo_import_static __tsan_go_atomic32_store
|
|
|
|
//go:cgo_import_static __tsan_go_atomic64_store
|
|
|
|
//go:cgo_import_static __tsan_go_atomic32_exchange
|
|
|
|
//go:cgo_import_static __tsan_go_atomic64_exchange
|
|
|
|
//go:cgo_import_static __tsan_go_atomic32_fetch_add
|
|
|
|
//go:cgo_import_static __tsan_go_atomic64_fetch_add
|
|
|
|
//go:cgo_import_static __tsan_go_atomic32_compare_exchange
|
|
|
|
//go:cgo_import_static __tsan_go_atomic64_compare_exchange
|
|
|
|
|
|
|
|
// start/end of global data (data+bss).
|
|
|
|
var racedatastart uintptr
|
|
|
|
var racedataend uintptr
|
|
|
|
|
|
|
|
// start/end of heap for race_amd64.s
|
|
|
|
var racearenastart uintptr
|
|
|
|
var racearenaend uintptr
|
|
|
|
|
|
|
|
func racefuncenter(uintptr)
|
|
|
|
func racefuncexit()
|
|
|
|
func racereadrangepc1(uintptr, uintptr, uintptr)
|
|
|
|
func racewriterangepc1(uintptr, uintptr, uintptr)
|
|
|
|
func racesymbolizethunk(uintptr)
|
|
|
|
|
|
|
|
// racecall allows calling an arbitrary function f from C race runtime
|
|
|
|
// with up to 4 uintptr arguments.
|
|
|
|
func racecall(*byte, uintptr, uintptr, uintptr, uintptr)
|
|
|
|
|
|
|
|
// checks if the address has shadow (i.e. heap or data/bss)
|
|
|
|
//go:nosplit
|
|
|
|
func isvalidaddr(addr unsafe.Pointer) bool {
|
|
|
|
return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
|
|
|
|
racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func raceinit() uintptr {
|
|
|
|
// cgo is required to initialize libc, which is used by race runtime
|
|
|
|
if !iscgo {
|
|
|
|
throw("raceinit: race build must use cgo")
|
|
|
|
}
|
|
|
|
|
|
|
|
var racectx uintptr
|
|
|
|
racecall(&__tsan_init, uintptr(unsafe.Pointer(&racectx)), funcPC(racesymbolizethunk), 0, 0)
|
|
|
|
|
|
|
|
// Round data segment to page boundaries, because it's used in mmap().
|
|
|
|
start := ^uintptr(0)
|
|
|
|
end := uintptr(0)
|
|
|
|
if start > firstmoduledata.noptrdata {
|
|
|
|
start = firstmoduledata.noptrdata
|
|
|
|
}
|
|
|
|
if start > firstmoduledata.data {
|
|
|
|
start = firstmoduledata.data
|
|
|
|
}
|
|
|
|
if start > firstmoduledata.noptrbss {
|
|
|
|
start = firstmoduledata.noptrbss
|
|
|
|
}
|
|
|
|
if start > firstmoduledata.bss {
|
|
|
|
start = firstmoduledata.bss
|
|
|
|
}
|
|
|
|
if end < firstmoduledata.enoptrdata {
|
|
|
|
end = firstmoduledata.enoptrdata
|
|
|
|
}
|
|
|
|
if end < firstmoduledata.edata {
|
|
|
|
end = firstmoduledata.edata
|
|
|
|
}
|
|
|
|
if end < firstmoduledata.enoptrbss {
|
|
|
|
end = firstmoduledata.enoptrbss
|
|
|
|
}
|
|
|
|
if end < firstmoduledata.ebss {
|
|
|
|
end = firstmoduledata.ebss
|
|
|
|
}
|
|
|
|
size := round(end-start, _PageSize)
|
|
|
|
racecall(&__tsan_map_shadow, start, size, 0, 0)
|
|
|
|
racedatastart = start
|
|
|
|
racedataend = start + size
|
|
|
|
|
|
|
|
return racectx
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func racefini() {
|
|
|
|
racecall(&__tsan_fini, 0, 0, 0, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func racemapshadow(addr unsafe.Pointer, size uintptr) {
|
|
|
|
if racearenastart == 0 {
|
|
|
|
racearenastart = uintptr(addr)
|
|
|
|
}
|
|
|
|
if racearenaend < uintptr(addr)+size {
|
|
|
|
racearenaend = uintptr(addr) + size
|
|
|
|
}
|
|
|
|
racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func racemalloc(p unsafe.Pointer, sz uintptr) {
|
|
|
|
racecall(&__tsan_malloc, uintptr(p), sz, 0, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func racegostart(pc uintptr) uintptr {
|
|
|
|
_g_ := getg()
|
|
|
|
var spawng *g
|
|
|
|
if _g_.m.curg != nil {
|
|
|
|
spawng = _g_.m.curg
|
|
|
|
} else {
|
|
|
|
spawng = _g_
|
|
|
|
}
|
|
|
|
|
|
|
|
var racectx uintptr
|
|
|
|
racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
|
|
|
|
return racectx
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func racegoend() {
|
|
|
|
racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
|
|
|
|
_g_ := getg()
|
|
|
|
if _g_ != _g_.m.curg {
|
|
|
|
// The call is coming from manual instrumentation of Go code running on g0/gsignal.
|
|
|
|
// Not interesting.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if callpc != 0 {
|
|
|
|
racefuncenter(callpc)
|
|
|
|
}
|
|
|
|
racewriterangepc1(uintptr(addr), sz, pc)
|
|
|
|
if callpc != 0 {
|
|
|
|
racefuncexit()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
|
|
|
|
_g_ := getg()
|
|
|
|
if _g_ != _g_.m.curg {
|
|
|
|
// The call is coming from manual instrumentation of Go code running on g0/gsignal.
|
|
|
|
// Not interesting.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if callpc != 0 {
|
|
|
|
racefuncenter(callpc)
|
|
|
|
}
|
|
|
|
racereadrangepc1(uintptr(addr), sz, pc)
|
|
|
|
if callpc != 0 {
|
|
|
|
racefuncexit()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func raceacquire(addr unsafe.Pointer) {
|
|
|
|
raceacquireg(getg(), addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func raceacquireg(gp *g, addr unsafe.Pointer) {
|
|
|
|
if getg().raceignore != 0 || !isvalidaddr(addr) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func racerelease(addr unsafe.Pointer) {
|
|
|
|
_g_ := getg()
|
|
|
|
if _g_.raceignore != 0 || !isvalidaddr(addr) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
racereleaseg(_g_, addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func racereleaseg(gp *g, addr unsafe.Pointer) {
|
|
|
|
if getg().raceignore != 0 || !isvalidaddr(addr) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func racereleasemerge(addr unsafe.Pointer) {
|
|
|
|
racereleasemergeg(getg(), addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func racereleasemergeg(gp *g, addr unsafe.Pointer) {
|
|
|
|
if getg().raceignore != 0 || !isvalidaddr(addr) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func racefingo() {
|
|
|
|
racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
|
|
|
func RaceAcquire(addr unsafe.Pointer) {
|
|
|
|
raceacquire(addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
|
|
|
func RaceRelease(addr unsafe.Pointer) {
|
|
|
|
racerelease(addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
|
|
|
func RaceReleaseMerge(addr unsafe.Pointer) {
|
|
|
|
racereleasemerge(addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
|
|
|
// RaceDisable disables handling of race events in the current goroutine.
|
|
|
|
func RaceDisable() {
|
|
|
|
_g_ := getg()
|
|
|
|
if _g_.raceignore == 0 {
|
|
|
|
racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
|
|
|
|
}
|
|
|
|
_g_.raceignore++
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
|
|
|
// RaceEnable re-enables handling of race events in the current goroutine.
|
|
|
|
func RaceEnable() {
|
|
|
|
_g_ := getg()
|
|
|
|
_g_.raceignore--
|
|
|
|
if _g_.raceignore == 0 {
|
|
|
|
racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
|
|
|
|
}
|
|
|
|
}
|