mirror of
https://github.com/golang/go
synced 2024-11-20 01:14:40 -07:00
243757576d
* append contents of race1.go to race.go * delete "Implementation of the race detector API." comment from race1.go Updates #12952 Change-Id: Ibdd9c4dc79a63c3bef69eade9525578063c86c1c Reviewed-on: https://go-review.googlesource.com/16023 Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org> Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
398 lines
9.8 KiB
Go
398 lines
9.8 KiB
Go
// Copyright 2012 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// +build race
|
|
|
|
// Public race detection API, present iff build with -race.
|
|
|
|
package runtime
|
|
|
|
import (
|
|
"unsafe"
|
|
)
|
|
|
|
func RaceRead(addr unsafe.Pointer)
|
|
func RaceWrite(addr unsafe.Pointer)
|
|
func RaceReadRange(addr unsafe.Pointer, len int)
|
|
func RaceWriteRange(addr unsafe.Pointer, len int)
|
|
|
|
func RaceSemacquire(s *uint32)
|
|
func RaceSemrelease(s *uint32)
|
|
|
|
// private interface for the runtime
|
|
const raceenabled = true
|
|
|
|
// For all functions accepting callerpc and pc,
|
|
// callerpc is a return PC of the function that calls this function,
|
|
// pc is start PC of the function that calls this function.
|
|
func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
|
|
kind := t.kind & kindMask
|
|
if kind == kindArray || kind == kindStruct {
|
|
// for composite objects we have to read every address
|
|
// because a write might happen to any subobject.
|
|
racereadrangepc(addr, t.size, callerpc, pc)
|
|
} else {
|
|
// for non-composite objects we can read just the start
|
|
// address, as any write must write the first byte.
|
|
racereadpc(addr, callerpc, pc)
|
|
}
|
|
}
|
|
|
|
func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
|
|
kind := t.kind & kindMask
|
|
if kind == kindArray || kind == kindStruct {
|
|
// for composite objects we have to write every address
|
|
// because a write might happen to any subobject.
|
|
racewriterangepc(addr, t.size, callerpc, pc)
|
|
} else {
|
|
// for non-composite objects we can write just the start
|
|
// address, as any write must write the first byte.
|
|
racewritepc(addr, callerpc, pc)
|
|
}
|
|
}
|
|
|
|
//go:noescape
|
|
func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
|
|
|
|
//go:noescape
|
|
func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
|
|
|
|
type symbolizeContext struct {
|
|
pc uintptr
|
|
fn *byte
|
|
file *byte
|
|
line uintptr
|
|
off uintptr
|
|
res uintptr
|
|
}
|
|
|
|
var qq = [...]byte{'?', '?', 0}
|
|
var dash = [...]byte{'-', 0}
|
|
|
|
// Callback from C into Go, runs on g0.
|
|
func racesymbolize(ctx *symbolizeContext) {
|
|
f := findfunc(ctx.pc)
|
|
if f == nil {
|
|
ctx.fn = &qq[0]
|
|
ctx.file = &dash[0]
|
|
ctx.line = 0
|
|
ctx.off = ctx.pc
|
|
ctx.res = 1
|
|
return
|
|
}
|
|
|
|
ctx.fn = cfuncname(f)
|
|
file, line := funcline(f, ctx.pc)
|
|
ctx.line = uintptr(line)
|
|
ctx.file = &bytes(file)[0] // assume NUL-terminated
|
|
ctx.off = ctx.pc - f.entry
|
|
ctx.res = 1
|
|
return
|
|
}
|
|
|
|
// Race runtime functions called via runtime·racecall.
|
|
//go:linkname __tsan_init __tsan_init
|
|
var __tsan_init byte
|
|
|
|
//go:linkname __tsan_fini __tsan_fini
|
|
var __tsan_fini byte
|
|
|
|
//go:linkname __tsan_map_shadow __tsan_map_shadow
|
|
var __tsan_map_shadow byte
|
|
|
|
//go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
|
|
var __tsan_finalizer_goroutine byte
|
|
|
|
//go:linkname __tsan_go_start __tsan_go_start
|
|
var __tsan_go_start byte
|
|
|
|
//go:linkname __tsan_go_end __tsan_go_end
|
|
var __tsan_go_end byte
|
|
|
|
//go:linkname __tsan_malloc __tsan_malloc
|
|
var __tsan_malloc byte
|
|
|
|
//go:linkname __tsan_acquire __tsan_acquire
|
|
var __tsan_acquire byte
|
|
|
|
//go:linkname __tsan_release __tsan_release
|
|
var __tsan_release byte
|
|
|
|
//go:linkname __tsan_release_merge __tsan_release_merge
|
|
var __tsan_release_merge byte
|
|
|
|
//go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
|
|
var __tsan_go_ignore_sync_begin byte
|
|
|
|
//go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
|
|
var __tsan_go_ignore_sync_end byte
|
|
|
|
// Mimic what cmd/cgo would do.
|
|
//go:cgo_import_static __tsan_init
|
|
//go:cgo_import_static __tsan_fini
|
|
//go:cgo_import_static __tsan_map_shadow
|
|
//go:cgo_import_static __tsan_finalizer_goroutine
|
|
//go:cgo_import_static __tsan_go_start
|
|
//go:cgo_import_static __tsan_go_end
|
|
//go:cgo_import_static __tsan_malloc
|
|
//go:cgo_import_static __tsan_acquire
|
|
//go:cgo_import_static __tsan_release
|
|
//go:cgo_import_static __tsan_release_merge
|
|
//go:cgo_import_static __tsan_go_ignore_sync_begin
|
|
//go:cgo_import_static __tsan_go_ignore_sync_end
|
|
|
|
// These are called from race_amd64.s.
|
|
//go:cgo_import_static __tsan_read
|
|
//go:cgo_import_static __tsan_read_pc
|
|
//go:cgo_import_static __tsan_read_range
|
|
//go:cgo_import_static __tsan_write
|
|
//go:cgo_import_static __tsan_write_pc
|
|
//go:cgo_import_static __tsan_write_range
|
|
//go:cgo_import_static __tsan_func_enter
|
|
//go:cgo_import_static __tsan_func_exit
|
|
|
|
//go:cgo_import_static __tsan_go_atomic32_load
|
|
//go:cgo_import_static __tsan_go_atomic64_load
|
|
//go:cgo_import_static __tsan_go_atomic32_store
|
|
//go:cgo_import_static __tsan_go_atomic64_store
|
|
//go:cgo_import_static __tsan_go_atomic32_exchange
|
|
//go:cgo_import_static __tsan_go_atomic64_exchange
|
|
//go:cgo_import_static __tsan_go_atomic32_fetch_add
|
|
//go:cgo_import_static __tsan_go_atomic64_fetch_add
|
|
//go:cgo_import_static __tsan_go_atomic32_compare_exchange
|
|
//go:cgo_import_static __tsan_go_atomic64_compare_exchange
|
|
|
|
// start/end of global data (data+bss).
|
|
var racedatastart uintptr
|
|
var racedataend uintptr
|
|
|
|
// start/end of heap for race_amd64.s
|
|
var racearenastart uintptr
|
|
var racearenaend uintptr
|
|
|
|
func racefuncenter(uintptr)
|
|
func racefuncexit()
|
|
func racereadrangepc1(uintptr, uintptr, uintptr)
|
|
func racewriterangepc1(uintptr, uintptr, uintptr)
|
|
func racesymbolizethunk(uintptr)
|
|
|
|
// racecall allows calling an arbitrary function f from C race runtime
|
|
// with up to 4 uintptr arguments.
|
|
func racecall(*byte, uintptr, uintptr, uintptr, uintptr)
|
|
|
|
// checks if the address has shadow (i.e. heap or data/bss)
|
|
//go:nosplit
|
|
func isvalidaddr(addr unsafe.Pointer) bool {
|
|
return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
|
|
racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
|
|
}
|
|
|
|
//go:nosplit
|
|
func raceinit() uintptr {
|
|
// cgo is required to initialize libc, which is used by race runtime
|
|
if !iscgo {
|
|
throw("raceinit: race build must use cgo")
|
|
}
|
|
|
|
var racectx uintptr
|
|
racecall(&__tsan_init, uintptr(unsafe.Pointer(&racectx)), funcPC(racesymbolizethunk), 0, 0)
|
|
|
|
// Round data segment to page boundaries, because it's used in mmap().
|
|
start := ^uintptr(0)
|
|
end := uintptr(0)
|
|
if start > firstmoduledata.noptrdata {
|
|
start = firstmoduledata.noptrdata
|
|
}
|
|
if start > firstmoduledata.data {
|
|
start = firstmoduledata.data
|
|
}
|
|
if start > firstmoduledata.noptrbss {
|
|
start = firstmoduledata.noptrbss
|
|
}
|
|
if start > firstmoduledata.bss {
|
|
start = firstmoduledata.bss
|
|
}
|
|
if end < firstmoduledata.enoptrdata {
|
|
end = firstmoduledata.enoptrdata
|
|
}
|
|
if end < firstmoduledata.edata {
|
|
end = firstmoduledata.edata
|
|
}
|
|
if end < firstmoduledata.enoptrbss {
|
|
end = firstmoduledata.enoptrbss
|
|
}
|
|
if end < firstmoduledata.ebss {
|
|
end = firstmoduledata.ebss
|
|
}
|
|
size := round(end-start, _PageSize)
|
|
racecall(&__tsan_map_shadow, start, size, 0, 0)
|
|
racedatastart = start
|
|
racedataend = start + size
|
|
|
|
return racectx
|
|
}
|
|
|
|
//go:nosplit
|
|
func racefini() {
|
|
racecall(&__tsan_fini, 0, 0, 0, 0)
|
|
}
|
|
|
|
//go:nosplit
|
|
func racemapshadow(addr unsafe.Pointer, size uintptr) {
|
|
if racearenastart == 0 {
|
|
racearenastart = uintptr(addr)
|
|
}
|
|
if racearenaend < uintptr(addr)+size {
|
|
racearenaend = uintptr(addr) + size
|
|
}
|
|
racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
|
|
}
|
|
|
|
//go:nosplit
|
|
func racemalloc(p unsafe.Pointer, sz uintptr) {
|
|
racecall(&__tsan_malloc, uintptr(p), sz, 0, 0)
|
|
}
|
|
|
|
//go:nosplit
|
|
func racegostart(pc uintptr) uintptr {
|
|
_g_ := getg()
|
|
var spawng *g
|
|
if _g_.m.curg != nil {
|
|
spawng = _g_.m.curg
|
|
} else {
|
|
spawng = _g_
|
|
}
|
|
|
|
var racectx uintptr
|
|
racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
|
|
return racectx
|
|
}
|
|
|
|
//go:nosplit
|
|
func racegoend() {
|
|
racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
|
|
}
|
|
|
|
//go:nosplit
|
|
func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
|
|
_g_ := getg()
|
|
if _g_ != _g_.m.curg {
|
|
// The call is coming from manual instrumentation of Go code running on g0/gsignal.
|
|
// Not interesting.
|
|
return
|
|
}
|
|
if callpc != 0 {
|
|
racefuncenter(callpc)
|
|
}
|
|
racewriterangepc1(uintptr(addr), sz, pc)
|
|
if callpc != 0 {
|
|
racefuncexit()
|
|
}
|
|
}
|
|
|
|
//go:nosplit
|
|
func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
|
|
_g_ := getg()
|
|
if _g_ != _g_.m.curg {
|
|
// The call is coming from manual instrumentation of Go code running on g0/gsignal.
|
|
// Not interesting.
|
|
return
|
|
}
|
|
if callpc != 0 {
|
|
racefuncenter(callpc)
|
|
}
|
|
racereadrangepc1(uintptr(addr), sz, pc)
|
|
if callpc != 0 {
|
|
racefuncexit()
|
|
}
|
|
}
|
|
|
|
//go:nosplit
|
|
func raceacquire(addr unsafe.Pointer) {
|
|
raceacquireg(getg(), addr)
|
|
}
|
|
|
|
//go:nosplit
|
|
func raceacquireg(gp *g, addr unsafe.Pointer) {
|
|
if getg().raceignore != 0 || !isvalidaddr(addr) {
|
|
return
|
|
}
|
|
racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
|
|
}
|
|
|
|
//go:nosplit
|
|
func racerelease(addr unsafe.Pointer) {
|
|
_g_ := getg()
|
|
if _g_.raceignore != 0 || !isvalidaddr(addr) {
|
|
return
|
|
}
|
|
racereleaseg(_g_, addr)
|
|
}
|
|
|
|
//go:nosplit
|
|
func racereleaseg(gp *g, addr unsafe.Pointer) {
|
|
if getg().raceignore != 0 || !isvalidaddr(addr) {
|
|
return
|
|
}
|
|
racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
|
|
}
|
|
|
|
//go:nosplit
|
|
func racereleasemerge(addr unsafe.Pointer) {
|
|
racereleasemergeg(getg(), addr)
|
|
}
|
|
|
|
//go:nosplit
|
|
func racereleasemergeg(gp *g, addr unsafe.Pointer) {
|
|
if getg().raceignore != 0 || !isvalidaddr(addr) {
|
|
return
|
|
}
|
|
racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
|
|
}
|
|
|
|
//go:nosplit
|
|
func racefingo() {
|
|
racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
|
|
}
|
|
|
|
//go:nosplit
|
|
|
|
func RaceAcquire(addr unsafe.Pointer) {
|
|
raceacquire(addr)
|
|
}
|
|
|
|
//go:nosplit
|
|
|
|
func RaceRelease(addr unsafe.Pointer) {
|
|
racerelease(addr)
|
|
}
|
|
|
|
//go:nosplit
|
|
|
|
func RaceReleaseMerge(addr unsafe.Pointer) {
|
|
racereleasemerge(addr)
|
|
}
|
|
|
|
//go:nosplit
|
|
|
|
// RaceDisable disables handling of race events in the current goroutine.
|
|
func RaceDisable() {
|
|
_g_ := getg()
|
|
if _g_.raceignore == 0 {
|
|
racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
|
|
}
|
|
_g_.raceignore++
|
|
}
|
|
|
|
//go:nosplit
|
|
|
|
// RaceEnable re-enables handling of race events in the current goroutine.
|
|
func RaceEnable() {
|
|
_g_ := getg()
|
|
_g_.raceignore--
|
|
if _g_.raceignore == 0 {
|
|
racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
|
|
}
|
|
}
|