diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go index 82d7b7687ba..82aa9f1ce89 100644 --- a/src/cmd/compile/internal/ssa/check.go +++ b/src/cmd/compile/internal/ssa/check.go @@ -310,6 +310,10 @@ func checkFunc(f *Func) { } } + memCheck(f) +} + +func memCheck(f *Func) { // Check that if a tuple has a memory type, it is second. for _, b := range f.Blocks { for _, v := range b.Values { @@ -319,24 +323,122 @@ func checkFunc(f *Func) { } } - // Check that only one memory is live at any point. - // TODO: make this check examine interblock. - if f.scheduled { - for _, b := range f.Blocks { - var mem *Value // the live memory + // Single live memory checks. + // These checks only work if there are no memory copies. + // (Memory copies introduce ambiguity about which mem value is really live. + // probably fixable, but it's easier to avoid the problem.) + // For the same reason, disable this check if some memory ops are unused. + for _, b := range f.Blocks { + for _, v := range b.Values { + if (v.Op == OpCopy || v.Uses == 0) && v.Type.IsMemory() { + return + } + } + if b != f.Entry && len(b.Preds) == 0 { + return + } + } + + // Compute live memory at the end of each block. + lastmem := make([]*Value, f.NumBlocks()) + ss := newSparseSet(f.NumValues()) + for _, b := range f.Blocks { + // Mark overwritten memory values. Those are args of other + // ops that generate memory values. + ss.clear() + for _, v := range b.Values { + if v.Op == OpPhi || !v.Type.IsMemory() { + continue + } + if m := v.MemoryArg(); m != nil { + ss.add(m.ID) + } + } + // There should be at most one remaining unoverwritten memory value. + for _, v := range b.Values { + if !v.Type.IsMemory() { + continue + } + if ss.contains(v.ID) { + continue + } + if lastmem[b.ID] != nil { + f.Fatalf("two live memory values in %s: %s and %s", b, lastmem[b.ID], v) + } + lastmem[b.ID] = v + } + // If there is no remaining memory value, that means there was no memory update. + // Take any memory arg. + if lastmem[b.ID] == nil { for _, v := range b.Values { - if v.Op != OpPhi { - for _, a := range v.Args { - if a.Type.IsMemory() || a.Type.IsTuple() && a.Type.FieldType(1).IsMemory() { - if mem == nil { - mem = a - } else if mem != a { - f.Fatalf("two live mems @ %s: %s and %s", v, mem, a) - } - } + if v.Op == OpPhi { + continue + } + m := v.MemoryArg() + if m == nil { + continue + } + if lastmem[b.ID] != nil && lastmem[b.ID] != m { + f.Fatalf("two live memory values in %s: %s and %s", b, lastmem[b.ID], m) + } + lastmem[b.ID] = m + } + } + } + // Propagate last live memory through storeless blocks. + for { + changed := false + for _, b := range f.Blocks { + if lastmem[b.ID] != nil { + continue + } + for _, e := range b.Preds { + p := e.b + if lastmem[p.ID] != nil { + lastmem[b.ID] = lastmem[p.ID] + changed = true + break + } + } + } + if !changed { + break + } + } + // Check merge points. + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Op == OpPhi && v.Type.IsMemory() { + for i, a := range v.Args { + if a != lastmem[b.Preds[i].b.ID] { + f.Fatalf("inconsistent memory phi %s %d %s %s", v.LongString(), i, a, lastmem[b.Preds[i].b.ID]) } } - if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() { + } + } + } + + // Check that only one memory is live at any point. + if f.scheduled { + for _, b := range f.Blocks { + var mem *Value // the current live memory in the block + for _, v := range b.Values { + if v.Op == OpPhi { + if v.Type.IsMemory() { + mem = v + } + continue + } + if mem == nil && len(b.Preds) > 0 { + // If no mem phi, take mem of any predecessor. + mem = lastmem[b.Preds[0].b.ID] + } + for _, a := range v.Args { + if a.Type.IsMemory() && a != mem { + f.Fatalf("two live mems @ %s: %s and %s", v, mem, a) + } + } + if v.Type.IsMemory() { mem = v } } diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index de3c6aed742..bac4930e781 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -34,10 +34,6 @@ func dse(f *Func) { } if v.Type.IsMemory() { stores = append(stores, v) - if v.Op == OpSelect1 { - // Use the args of the tuple-generating op. - v = v.Args[0] - } for _, a := range v.Args { if a.Block == b && a.Type.IsMemory() { storeUse.add(a.ID) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 55433954046..711373a3e09 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -487,12 +487,12 @@ (AtomicExchange64 ptr val mem) -> (XCHGQ val ptr mem) // Atomic adds. -(AtomicAdd32 ptr val mem) -> (AddTupleFirst32 (XADDLlock val ptr mem) val) -(AtomicAdd64 ptr val mem) -> (AddTupleFirst64 (XADDQlock val ptr mem) val) -(Select0 (AddTupleFirst32 tuple val)) -> (ADDL val (Select0 tuple)) -(Select1 (AddTupleFirst32 tuple _ )) -> (Select1 tuple) -(Select0 (AddTupleFirst64 tuple val)) -> (ADDQ val (Select0 tuple)) -(Select1 (AddTupleFirst64 tuple _ )) -> (Select1 tuple) +(AtomicAdd32 ptr val mem) -> (AddTupleFirst32 val (XADDLlock val ptr mem)) +(AtomicAdd64 ptr val mem) -> (AddTupleFirst64 val (XADDQlock val ptr mem)) +(Select0 (AddTupleFirst32 val tuple)) -> (ADDL val (Select0 tuple)) +(Select1 (AddTupleFirst32 _ tuple)) -> (Select1 tuple) +(Select0 (AddTupleFirst64 val tuple)) -> (ADDQ val (Select0 tuple)) +(Select1 (AddTupleFirst64 _ tuple)) -> (Select1 tuple) // Atomic compare and swap. (AtomicCompareAndSwap32 ptr old new_ mem) -> (CMPXCHGLlock ptr old new_ mem) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index ed77bb00d7d..28131db5f5d 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -572,8 +572,8 @@ func init() { // Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)! {name: "XADDLlock", argLength: 3, reg: gpstorexchg, asm: "XADDL", typ: "(UInt32,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"}, {name: "XADDQlock", argLength: 3, reg: gpstorexchg, asm: "XADDQ", typ: "(UInt64,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"}, - {name: "AddTupleFirst32", argLength: 2}, // arg0=tuple . Returns . - {name: "AddTupleFirst64", argLength: 2}, // arg0=tuple . Returns . + {name: "AddTupleFirst32", argLength: 2}, // arg1=tuple . Returns . + {name: "AddTupleFirst64", argLength: 2}, // arg1=tuple . Returns . // Compare and swap. // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules index aed4f5cd719..4ae21cd55b5 100644 --- a/src/cmd/compile/internal/ssa/gen/S390X.rules +++ b/src/cmd/compile/internal/ssa/gen/S390X.rules @@ -120,12 +120,12 @@ (AtomicStorePtrNoWB ptr val mem) -> (MOVDatomicstore ptr val mem) // Atomic adds. -(AtomicAdd32 ptr val mem) -> (AddTupleFirst32 (LAA ptr val mem) val) -(AtomicAdd64 ptr val mem) -> (AddTupleFirst64 (LAAG ptr val mem) val) -(Select0 (AddTupleFirst32 tuple val)) -> (ADDW val (Select0 tuple)) -(Select1 (AddTupleFirst32 tuple _ )) -> (Select1 tuple) -(Select0 (AddTupleFirst64 tuple val)) -> (ADD val (Select0 tuple)) -(Select1 (AddTupleFirst64 tuple _ )) -> (Select1 tuple) +(AtomicAdd32 ptr val mem) -> (AddTupleFirst32 val (LAA ptr val mem)) +(AtomicAdd64 ptr val mem) -> (AddTupleFirst64 val (LAAG ptr val mem)) +(Select0 (AddTupleFirst32 val tuple)) -> (ADDW val (Select0 tuple)) +(Select1 (AddTupleFirst32 _ tuple)) -> (Select1 tuple) +(Select0 (AddTupleFirst64 val tuple)) -> (ADD val (Select0 tuple)) +(Select1 (AddTupleFirst64 _ tuple)) -> (Select1 tuple) // Atomic exchanges. (AtomicExchange32 ptr val mem) -> (LoweredAtomicExchange32 ptr val mem) diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go index c3edb9385df..2a08a276d9f 100644 --- a/src/cmd/compile/internal/ssa/gen/S390XOps.go +++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go @@ -467,8 +467,8 @@ func init() { // Returns a tuple of . {name: "LAA", argLength: 3, reg: gpstorelaa, asm: "LAA", typ: "(UInt32,Mem)", aux: "SymOff", faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, {name: "LAAG", argLength: 3, reg: gpstorelaa, asm: "LAAG", typ: "(UInt64,Mem)", aux: "SymOff", faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, - {name: "AddTupleFirst32", argLength: 2}, // arg0=tuple . Returns . - {name: "AddTupleFirst64", argLength: 2}, // arg0=tuple . Returns . + {name: "AddTupleFirst32", argLength: 2}, // arg1=tuple . Returns . + {name: "AddTupleFirst64", argLength: 2}, // arg1=tuple . Returns . // Compare and swap. // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. diff --git a/src/cmd/compile/internal/ssa/loopreschedchecks.go b/src/cmd/compile/internal/ssa/loopreschedchecks.go index 8ffca82a683..98b6e92e936 100644 --- a/src/cmd/compile/internal/ssa/loopreschedchecks.go +++ b/src/cmd/compile/internal/ssa/loopreschedchecks.go @@ -391,10 +391,6 @@ func findLastMems(f *Func) []*Value { } if v.Type.IsMemory() { stores = append(stores, v) - if v.Op == OpSelect1 { - // Use the arg of the tuple-generating op. - v = v.Args[0] - } for _, a := range v.Args { if a.Block == b && a.Type.IsMemory() { storeUse.add(a.ID) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index ff24103eec2..9a69a310436 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -35902,18 +35902,18 @@ func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool { _ = typ // match: (AtomicAdd32 ptr val mem) // cond: - // result: (AddTupleFirst32 (XADDLlock val ptr mem) val) + // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) for { ptr := v.Args[0] val := v.Args[1] mem := v.Args[2] v.reset(OpAMD64AddTupleFirst32) + v.AddArg(val) v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) v0.AddArg(val) v0.AddArg(ptr) v0.AddArg(mem) v.AddArg(v0) - v.AddArg(val) return true } } @@ -35924,18 +35924,18 @@ func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool { _ = typ // match: (AtomicAdd64 ptr val mem) // cond: - // result: (AddTupleFirst64 (XADDQlock val ptr mem) val) + // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) for { ptr := v.Args[0] val := v.Args[1] mem := v.Args[2] v.reset(OpAMD64AddTupleFirst64) + v.AddArg(val) v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) v0.AddArg(val) v0.AddArg(ptr) v0.AddArg(mem) v.AddArg(v0) - v.AddArg(val) return true } } @@ -40216,7 +40216,7 @@ func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { func rewriteValueAMD64_OpSelect0_0(v *Value) bool { b := v.Block _ = b - // match: (Select0 (AddTupleFirst32 tuple val)) + // match: (Select0 (AddTupleFirst32 val tuple)) // cond: // result: (ADDL val (Select0 tuple)) for { @@ -40225,8 +40225,8 @@ func rewriteValueAMD64_OpSelect0_0(v *Value) bool { if v_0.Op != OpAMD64AddTupleFirst32 { break } - tuple := v_0.Args[0] - val := v_0.Args[1] + val := v_0.Args[0] + tuple := v_0.Args[1] v.reset(OpAMD64ADDL) v.AddArg(val) v0 := b.NewValue0(v.Pos, OpSelect0, t) @@ -40234,7 +40234,7 @@ func rewriteValueAMD64_OpSelect0_0(v *Value) bool { v.AddArg(v0) return true } - // match: (Select0 (AddTupleFirst64 tuple val)) + // match: (Select0 (AddTupleFirst64 val tuple)) // cond: // result: (ADDQ val (Select0 tuple)) for { @@ -40243,8 +40243,8 @@ func rewriteValueAMD64_OpSelect0_0(v *Value) bool { if v_0.Op != OpAMD64AddTupleFirst64 { break } - tuple := v_0.Args[0] - val := v_0.Args[1] + val := v_0.Args[0] + tuple := v_0.Args[1] v.reset(OpAMD64ADDQ) v.AddArg(val) v0 := b.NewValue0(v.Pos, OpSelect0, t) @@ -40255,7 +40255,7 @@ func rewriteValueAMD64_OpSelect0_0(v *Value) bool { return false } func rewriteValueAMD64_OpSelect1_0(v *Value) bool { - // match: (Select1 (AddTupleFirst32 tuple _)) + // match: (Select1 (AddTupleFirst32 _ tuple)) // cond: // result: (Select1 tuple) for { @@ -40263,12 +40263,12 @@ func rewriteValueAMD64_OpSelect1_0(v *Value) bool { if v_0.Op != OpAMD64AddTupleFirst32 { break } - tuple := v_0.Args[0] + tuple := v_0.Args[1] v.reset(OpSelect1) v.AddArg(tuple) return true } - // match: (Select1 (AddTupleFirst64 tuple _)) + // match: (Select1 (AddTupleFirst64 _ tuple)) // cond: // result: (Select1 tuple) for { @@ -40276,7 +40276,7 @@ func rewriteValueAMD64_OpSelect1_0(v *Value) bool { if v_0.Op != OpAMD64AddTupleFirst64 { break } - tuple := v_0.Args[0] + tuple := v_0.Args[1] v.reset(OpSelect1) v.AddArg(tuple) return true diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index 1929f5491df..f07ca9a5680 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -898,18 +898,18 @@ func rewriteValueS390X_OpAtomicAdd32_0(v *Value) bool { _ = typ // match: (AtomicAdd32 ptr val mem) // cond: - // result: (AddTupleFirst32 (LAA ptr val mem) val) + // result: (AddTupleFirst32 val (LAA ptr val mem)) for { ptr := v.Args[0] val := v.Args[1] mem := v.Args[2] v.reset(OpS390XAddTupleFirst32) + v.AddArg(val) v0 := b.NewValue0(v.Pos, OpS390XLAA, types.NewTuple(typ.UInt32, types.TypeMem)) v0.AddArg(ptr) v0.AddArg(val) v0.AddArg(mem) v.AddArg(v0) - v.AddArg(val) return true } } @@ -920,18 +920,18 @@ func rewriteValueS390X_OpAtomicAdd64_0(v *Value) bool { _ = typ // match: (AtomicAdd64 ptr val mem) // cond: - // result: (AddTupleFirst64 (LAAG ptr val mem) val) + // result: (AddTupleFirst64 val (LAAG ptr val mem)) for { ptr := v.Args[0] val := v.Args[1] mem := v.Args[2] v.reset(OpS390XAddTupleFirst64) + v.AddArg(val) v0 := b.NewValue0(v.Pos, OpS390XLAAG, types.NewTuple(typ.UInt64, types.TypeMem)) v0.AddArg(ptr) v0.AddArg(val) v0.AddArg(mem) v.AddArg(v0) - v.AddArg(val) return true } } @@ -34159,7 +34159,7 @@ func rewriteValueS390X_OpS390XXORconst_0(v *Value) bool { func rewriteValueS390X_OpSelect0_0(v *Value) bool { b := v.Block _ = b - // match: (Select0 (AddTupleFirst32 tuple val)) + // match: (Select0 (AddTupleFirst32 val tuple)) // cond: // result: (ADDW val (Select0 tuple)) for { @@ -34168,8 +34168,8 @@ func rewriteValueS390X_OpSelect0_0(v *Value) bool { if v_0.Op != OpS390XAddTupleFirst32 { break } - tuple := v_0.Args[0] - val := v_0.Args[1] + val := v_0.Args[0] + tuple := v_0.Args[1] v.reset(OpS390XADDW) v.AddArg(val) v0 := b.NewValue0(v.Pos, OpSelect0, t) @@ -34177,7 +34177,7 @@ func rewriteValueS390X_OpSelect0_0(v *Value) bool { v.AddArg(v0) return true } - // match: (Select0 (AddTupleFirst64 tuple val)) + // match: (Select0 (AddTupleFirst64 val tuple)) // cond: // result: (ADD val (Select0 tuple)) for { @@ -34186,8 +34186,8 @@ func rewriteValueS390X_OpSelect0_0(v *Value) bool { if v_0.Op != OpS390XAddTupleFirst64 { break } - tuple := v_0.Args[0] - val := v_0.Args[1] + val := v_0.Args[0] + tuple := v_0.Args[1] v.reset(OpS390XADD) v.AddArg(val) v0 := b.NewValue0(v.Pos, OpSelect0, t) @@ -34198,7 +34198,7 @@ func rewriteValueS390X_OpSelect0_0(v *Value) bool { return false } func rewriteValueS390X_OpSelect1_0(v *Value) bool { - // match: (Select1 (AddTupleFirst32 tuple _)) + // match: (Select1 (AddTupleFirst32 _ tuple)) // cond: // result: (Select1 tuple) for { @@ -34206,12 +34206,12 @@ func rewriteValueS390X_OpSelect1_0(v *Value) bool { if v_0.Op != OpS390XAddTupleFirst32 { break } - tuple := v_0.Args[0] + tuple := v_0.Args[1] v.reset(OpSelect1) v.AddArg(tuple) return true } - // match: (Select1 (AddTupleFirst64 tuple _)) + // match: (Select1 (AddTupleFirst64 _ tuple)) // cond: // result: (Select1 tuple) for { @@ -34219,7 +34219,7 @@ func rewriteValueS390X_OpSelect1_0(v *Value) bool { if v_0.Op != OpS390XAddTupleFirst64 { break } - tuple := v_0.Args[0] + tuple := v_0.Args[1] v.reset(OpSelect1) v.AddArg(tuple) return true diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go index 2e9464eb0dd..c44c243eaca 100644 --- a/src/cmd/compile/internal/ssa/schedule.go +++ b/src/cmd/compile/internal/ssa/schedule.go @@ -132,19 +132,14 @@ func schedule(f *Func) { } } - // TODO: make this logic permanent in types.IsMemory? - isMem := func(v *Value) bool { - return v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() - } - for _, b := range f.Blocks { // Find store chain for block. // Store chains for different blocks overwrite each other, so // the calculated store chain is good only for this block. for _, v := range b.Values { - if v.Op != OpPhi && isMem(v) { + if v.Op != OpPhi && v.Type.IsMemory() { for _, w := range v.Args { - if isMem(w) { + if w.Type.IsMemory() { nextMem[w.ID] = v } } @@ -164,7 +159,7 @@ func schedule(f *Func) { uses[w.ID]++ } // Any load must come before the following store. - if !isMem(v) && isMem(w) { + if !v.Type.IsMemory() && w.Type.IsMemory() { // v is a load. s := nextMem[w.ID] if s == nil || s.Block != b { @@ -315,11 +310,7 @@ func storeOrder(values []*Value, sset *sparseSet, storeNumber []int32) []*Value if v.Op == OpInitMem || v.Op == OpPhi { continue } - a := v - if v.Op == OpSelect1 { - a = a.Args[0] - } - sset.add(a.MemoryArg().ID) // record that v's memory arg is used + sset.add(v.MemoryArg().ID) // record that v's memory arg is used } if v.Op == OpNilCheck { hasNilCheck = true @@ -335,7 +326,7 @@ func storeOrder(values []*Value, sset *sparseSet, storeNumber []int32) []*Value for _, v := range stores { if !sset.contains(v.ID) { if last != nil { - f.Fatalf("two stores live simutaneously: %v and %v", v, last) + f.Fatalf("two stores live simultaneously: %v and %v", v, last) } last = v } @@ -362,9 +353,6 @@ func storeOrder(values []*Value, sset *sparseSet, storeNumber []int32) []*Value } break } - if w.Op == OpSelect1 { - w = w.Args[0] - } w = w.MemoryArg() } var stack []*Value diff --git a/src/cmd/compile/internal/ssa/trim.go b/src/cmd/compile/internal/ssa/trim.go index 09e80bdfe5a..04b4fd4d541 100644 --- a/src/cmd/compile/internal/ssa/trim.go +++ b/src/cmd/compile/internal/ssa/trim.go @@ -46,10 +46,24 @@ func trim(f *Func) { v.resetArgs() continue } - // Pad the arguments of the remaining phi-ops, so + // Pad the arguments of the remaining phi-ops so // they match the new predecessor count of `s`. - for len(v.Args) < len(s.Preds) { - v.AddArg(v.Args[0]) + // Since s did not have a Phi op corresponding to + // the phi op in b, the other edges coming into s + // must be loopback edges from s, so v is the right + // argument to v! + args := make([]*Value, len(v.Args)) + copy(args, v.Args) + v.resetArgs() + for x := 0; x < j; x++ { + v.AddArg(v) + } + v.AddArg(args[0]) + for x := j + 1; x < ns; x++ { + v.AddArg(v) + } + for _, a := range args[1:] { + v.AddArg(a) } } b.Values[k] = v diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index a0ba112a8d4..7edc71be52a 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -319,10 +319,8 @@ func (v *Value) RegName() string { } // MemoryArg returns the memory argument for the Value. -// The returned value, if non-nil, will be memory-typed, -// except in the case where v is Select1, in which case -// the returned value will be a tuple containing a memory -// type. Otherwise, nil is returned. +// The returned value, if non-nil, will be memory-typed (or a tuple with a memory-typed second part). +// Otherwise, nil is returned. func (v *Value) MemoryArg() *Value { if v.Op == OpPhi { v.Fatalf("MemoryArg on Phi") @@ -331,8 +329,7 @@ func (v *Value) MemoryArg() *Value { if na == 0 { return nil } - if m := v.Args[na-1]; m.Type.IsMemory() || - (v.Op == OpSelect1 && m.Type.FieldType(1).IsMemory()) { + if m := v.Args[na-1]; m.Type.IsMemory() { return m } return nil diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 6f2f574b397..5c44e625856 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -1324,10 +1324,12 @@ func (t *Type) ChanDir() ChanDir { return t.Extra.(*Chan).Dir } -func (t *Type) IsMemory() bool { return t == TypeMem } -func (t *Type) IsFlags() bool { return t == TypeFlags } -func (t *Type) IsVoid() bool { return t == TypeVoid } -func (t *Type) IsTuple() bool { return t.Etype == TTUPLE } +func (t *Type) IsMemory() bool { + return t == TypeMem || t.Etype == TTUPLE && t.Extra.(*Tuple).second == TypeMem +} +func (t *Type) IsFlags() bool { return t == TypeFlags } +func (t *Type) IsVoid() bool { return t == TypeVoid } +func (t *Type) IsTuple() bool { return t.Etype == TTUPLE } // IsUntyped reports whether t is an untyped type. func (t *Type) IsUntyped() bool {