mirror of
https://github.com/golang/go
synced 2024-11-19 14:34:42 -07:00
8f81dfe8b4
Currently, we perform write barriers after performing pointer writes. At the moment, it simply doesn't matter what order this happens in, as long as they appear atomic to GC. But both the hybrid barrier and ROC are going to require a pre-write write barrier. For the hybrid barrier, this is important because the barrier needs to observe both the current value of the slot and the value that will be written to it. (Alternatively, the caller could do the write and pass in the old value, but it seems easier and more useful to just swap the order of the barrier and the write.) For ROC, this is necessary because, if the pointer write is going to make the pointer reachable to some goroutine that it currently is not visible to, the garbage collector must take some special action before that pointer becomes more broadly visible. This commits swaps pointer writes around so the write barrier occurs before the pointer write. The main subtlety here is bulk memory writes. Currently, these copy to the destination first and then use the pointer bitmap of the destination to find the copied pointers and invoke the write barrier. This is necessary because the source may not have a pointer bitmap. To handle these, we pass both the source and the destination to the bulk memory barrier, which uses the pointer bitmap of the destination, but reads the pointer values from the source. Updates #17503. Change-Id: I78ecc0c5c94ee81c29019c305b3d232069294a55 Reviewed-on: https://go-review.googlesource.com/31763 Reviewed-by: Rick Hudson <rlh@golang.org>
70 lines
2.9 KiB
Go
70 lines
2.9 KiB
Go
// Copyright 2009 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package runtime
|
|
|
|
import (
|
|
"runtime/internal/atomic"
|
|
"unsafe"
|
|
)
|
|
|
|
// These functions cannot have go:noescape annotations,
|
|
// because while ptr does not escape, new does.
|
|
// If new is marked as not escaping, the compiler will make incorrect
|
|
// escape analysis decisions about the pointer value being stored.
|
|
// Instead, these are wrappers around the actual atomics (casp1 and so on)
|
|
// that use noescape to convey which arguments do not escape.
|
|
|
|
// atomicstorep performs *ptr = new atomically and invokes a write barrier.
|
|
//
|
|
//go:nosplit
|
|
func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
|
|
writebarrierptr_prewrite((*uintptr)(ptr), uintptr(new))
|
|
atomic.StorepNoWB(noescape(ptr), new)
|
|
}
|
|
|
|
//go:nosplit
|
|
func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
|
|
// The write barrier is only necessary if the CAS succeeds,
|
|
// but since it needs to happen before the write becomes
|
|
// public, we have to do it conservatively all the time.
|
|
writebarrierptr_prewrite((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
|
|
return atomic.Casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new)
|
|
}
|
|
|
|
// Like above, but implement in terms of sync/atomic's uintptr operations.
|
|
// We cannot just call the runtime routines, because the race detector expects
|
|
// to be able to intercept the sync/atomic forms but not the runtime forms.
|
|
|
|
//go:linkname sync_atomic_StoreUintptr sync/atomic.StoreUintptr
|
|
func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr)
|
|
|
|
//go:linkname sync_atomic_StorePointer sync/atomic.StorePointer
|
|
//go:nosplit
|
|
func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) {
|
|
writebarrierptr_prewrite((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
|
|
sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
|
|
}
|
|
|
|
//go:linkname sync_atomic_SwapUintptr sync/atomic.SwapUintptr
|
|
func sync_atomic_SwapUintptr(ptr *uintptr, new uintptr) uintptr
|
|
|
|
//go:linkname sync_atomic_SwapPointer sync/atomic.SwapPointer
|
|
//go:nosplit
|
|
func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
|
|
writebarrierptr_prewrite((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
|
|
old := unsafe.Pointer(sync_atomic_SwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(new)))
|
|
return old
|
|
}
|
|
|
|
//go:linkname sync_atomic_CompareAndSwapUintptr sync/atomic.CompareAndSwapUintptr
|
|
func sync_atomic_CompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool
|
|
|
|
//go:linkname sync_atomic_CompareAndSwapPointer sync/atomic.CompareAndSwapPointer
|
|
//go:nosplit
|
|
func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
|
|
writebarrierptr_prewrite((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
|
|
return sync_atomic_CompareAndSwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(old), uintptr(new))
|
|
}
|