mirror of
https://github.com/golang/go
synced 2024-11-18 01:54:45 -07:00
cmd/compile: reorganize write barrier code a bit
This exposes mightBeHeapPointer and mightContainHeapPointer which I plan to use in future CLs. Change-Id: Ice4ae3b33127936868fff6cc045d8703d0b1a79a Reviewed-on: https://go-review.googlesource.com/c/go/+/447776 Reviewed-by: Cherry Mui <cherryyz@google.com> Reviewed-by: Keith Randall <khr@google.com> Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
parent
0e42632301
commit
6b5b7b3240
@ -24,10 +24,60 @@ type ZeroRegion struct {
|
||||
mask uint64
|
||||
}
|
||||
|
||||
// mightBeHeapPointer reports whether v might point to the heap.
|
||||
// v must have pointer type.
|
||||
func mightBeHeapPointer(v *Value) bool {
|
||||
if IsGlobalAddr(v) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// mightContainHeapPointer reports whether the data currently at addresses
|
||||
// [ptr,ptr+size) might contain heap pointers. "currently" means at memory state mem.
|
||||
// zeroes contains ZeroRegion data to help make that decision (see computeZeroMap).
|
||||
func mightContainHeapPointer(ptr *Value, size int64, mem *Value, zeroes map[ID]ZeroRegion) bool {
|
||||
if IsReadOnlyGlobalAddr(ptr) {
|
||||
// The read-only globals section cannot contain any heap pointers.
|
||||
return false
|
||||
}
|
||||
|
||||
// See if we can prove that the queried memory is all zero.
|
||||
|
||||
// Find base pointer and offset. Hopefully, the base is the result of a new(T).
|
||||
var off int64
|
||||
for ptr.Op == OpOffPtr {
|
||||
off += ptr.AuxInt
|
||||
ptr = ptr.Args[0]
|
||||
}
|
||||
|
||||
ptrSize := ptr.Block.Func.Config.PtrSize
|
||||
if off%ptrSize != 0 || size%ptrSize != 0 {
|
||||
ptr.Fatalf("unaligned pointer write")
|
||||
}
|
||||
if off < 0 || off+size > 64*ptrSize {
|
||||
// memory range goes off end of tracked offsets
|
||||
return true
|
||||
}
|
||||
z := zeroes[mem.ID]
|
||||
if ptr != z.base {
|
||||
// This isn't the object we know about at this memory state.
|
||||
return true
|
||||
}
|
||||
// Mask of bits we're asking about
|
||||
m := (uint64(1)<<(size/ptrSize) - 1) << (off / ptrSize)
|
||||
|
||||
if z.mask&m == m {
|
||||
// All locations are known to be zero, so no heap pointers.
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// needwb reports whether we need write barrier for store op v.
|
||||
// v must be Store/Move/Zero.
|
||||
// zeroes provides known zero information (keyed by ID of memory-type values).
|
||||
func needwb(v *Value, zeroes map[ID]ZeroRegion, select1 []*Value) bool {
|
||||
func needwb(v *Value, zeroes map[ID]ZeroRegion) bool {
|
||||
t, ok := v.Aux.(*types.Type)
|
||||
if !ok {
|
||||
v.Fatalf("store aux is not a type: %s", v.LongString())
|
||||
@ -35,43 +85,30 @@ func needwb(v *Value, zeroes map[ID]ZeroRegion, select1 []*Value) bool {
|
||||
if !t.HasPointers() {
|
||||
return false
|
||||
}
|
||||
if IsStackAddr(v.Args[0]) {
|
||||
return false // write on stack doesn't need write barrier
|
||||
dst := v.Args[0]
|
||||
if IsStackAddr(dst) {
|
||||
return false // writes into the stack don't need write barrier
|
||||
}
|
||||
if v.Op == OpMove && IsReadOnlyGlobalAddr(v.Args[1]) {
|
||||
if mem, ok := IsNewObject(v.Args[0], select1); ok && mem == v.MemoryArg() {
|
||||
// Copying data from readonly memory into a fresh object doesn't need a write barrier.
|
||||
// If we're writing to a place that might have heap pointers, we need
|
||||
// the write barrier.
|
||||
if mightContainHeapPointer(dst, t.Size(), v.MemoryArg(), zeroes) {
|
||||
return true
|
||||
}
|
||||
// Lastly, check if the values we're writing might be heap pointers.
|
||||
// If they aren't, we don't need a write barrier.
|
||||
switch v.Op {
|
||||
case OpStore:
|
||||
if !mightBeHeapPointer(v.Args[1]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if v.Op == OpStore && IsGlobalAddr(v.Args[1]) {
|
||||
// Storing pointers to non-heap locations into zeroed memory doesn't need a write barrier.
|
||||
ptr := v.Args[0]
|
||||
var off int64
|
||||
size := v.Aux.(*types.Type).Size()
|
||||
for ptr.Op == OpOffPtr {
|
||||
off += ptr.AuxInt
|
||||
ptr = ptr.Args[0]
|
||||
case OpZero:
|
||||
return false // nil is not a heap pointer
|
||||
case OpMove:
|
||||
if !mightContainHeapPointer(v.Args[1], t.Size(), v.Args[2], zeroes) {
|
||||
return false
|
||||
}
|
||||
ptrSize := v.Block.Func.Config.PtrSize
|
||||
if off%ptrSize != 0 || size%ptrSize != 0 {
|
||||
v.Fatalf("unaligned pointer write")
|
||||
}
|
||||
if off < 0 || off+size > 64*ptrSize {
|
||||
// write goes off end of tracked offsets
|
||||
return true
|
||||
}
|
||||
z := zeroes[v.MemoryArg().ID]
|
||||
if ptr != z.base {
|
||||
return true
|
||||
}
|
||||
for i := off; i < off+size; i += ptrSize {
|
||||
if z.mask>>uint(i/ptrSize)&1 == 0 {
|
||||
return true // not known to be zero
|
||||
}
|
||||
}
|
||||
// All written locations are known to be zero - write barrier not needed.
|
||||
return false
|
||||
default:
|
||||
v.Fatalf("store op unknown: %s", v.LongString())
|
||||
}
|
||||
return true
|
||||
}
|
||||
@ -122,7 +159,7 @@ func writebarrier(f *Func) {
|
||||
for _, v := range b.Values {
|
||||
switch v.Op {
|
||||
case OpStore, OpMove, OpZero:
|
||||
if needwb(v, zeroes, select1) {
|
||||
if needwb(v, zeroes) {
|
||||
switch v.Op {
|
||||
case OpStore:
|
||||
v.Op = OpStoreWB
|
||||
@ -592,7 +629,7 @@ func IsReadOnlyGlobalAddr(v *Value) bool {
|
||||
// Nil pointers are read only. See issue 33438.
|
||||
return true
|
||||
}
|
||||
if v.Op == OpAddr && v.Aux.(*obj.LSym).Type == objabi.SRODATA {
|
||||
if v.Op == OpAddr && v.Aux != nil && v.Aux.(*obj.LSym).Type == objabi.SRODATA {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
Loading…
Reference in New Issue
Block a user