1
0
mirror of https://github.com/golang/go synced 2024-11-18 15:54:42 -07:00

cmd/compile: elide unnecessary sign/zeroExt, PPC64

Bias {Eq,Neq}{8,16} to prefer the extension likely to match
their operand's load (if loaded), and elide sign and zero
extending MOV{B,W}, MOV{B,W}Z when their operands are already
appropriately extended.

Change-Id: Ic01b9cab55e170f68fc2369688b50ce78a818608
Reviewed-on: https://go-review.googlesource.com/29236
Run-TryBot: David Chase <drchase@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
This commit is contained in:
David Chase 2016-09-15 15:14:32 -07:00
parent 46ba59025f
commit c87528d776
2 changed files with 172 additions and 0 deletions

View File

@ -196,6 +196,9 @@
// Lowering comparisons
(EqB x y) -> (ANDconst [1] (EQV x y))
// Sign extension dependence on operand sign sets up for sign/zero-extension elision later
(Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
(Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
(Eq8 x y) -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
(Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
(Eq32 x y) -> (Equal (CMPW x y))
@ -205,6 +208,9 @@
(EqPtr x y) -> (Equal (CMP x y))
(NeqB x y) -> (XOR x y)
// Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
(Neq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
(Neq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
(Neq8 x y) -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
(Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
(Neq32 x y) -> (NotEqual (CMPW x y))
@ -620,3 +626,9 @@
(Trunc64to16 x) -> (MOVHreg x)
(Trunc64to32 x) -> (MOVWreg x)
// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
// This may interact with other patterns in the future. (Compare with arm64)
(MOVBZreg x:(MOVBZload _ _)) -> x
(MOVBreg x:(MOVBload _ _)) -> x
(MOVHZreg x:(MOVHZload _ _)) -> x
(MOVHreg x:(MOVHload _ _)) -> x

View File

@ -368,8 +368,12 @@ func rewriteValuePPC64(v *Value, config *Config) bool {
return rewriteValuePPC64_OpPPC64LessThan(v, config)
case OpPPC64MOVBZload:
return rewriteValuePPC64_OpPPC64MOVBZload(v, config)
case OpPPC64MOVBZreg:
return rewriteValuePPC64_OpPPC64MOVBZreg(v, config)
case OpPPC64MOVBload:
return rewriteValuePPC64_OpPPC64MOVBload(v, config)
case OpPPC64MOVBreg:
return rewriteValuePPC64_OpPPC64MOVBreg(v, config)
case OpPPC64MOVBstore:
return rewriteValuePPC64_OpPPC64MOVBstore(v, config)
case OpPPC64MOVBstorezero:
@ -382,8 +386,12 @@ func rewriteValuePPC64(v *Value, config *Config) bool {
return rewriteValuePPC64_OpPPC64MOVDstorezero(v, config)
case OpPPC64MOVHZload:
return rewriteValuePPC64_OpPPC64MOVHZload(v, config)
case OpPPC64MOVHZreg:
return rewriteValuePPC64_OpPPC64MOVHZreg(v, config)
case OpPPC64MOVHload:
return rewriteValuePPC64_OpPPC64MOVHload(v, config)
case OpPPC64MOVHreg:
return rewriteValuePPC64_OpPPC64MOVHreg(v, config)
case OpPPC64MOVHstore:
return rewriteValuePPC64_OpPPC64MOVHstore(v, config)
case OpPPC64MOVHstorezero:
@ -1296,6 +1304,26 @@ func rewriteValuePPC64_OpEq16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Eq16 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
if !(isSigned(x.Type) && isSigned(y.Type)) {
break
}
v.reset(OpPPC64Equal)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
// match: (Eq16 x y)
// cond:
// result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
for {
@ -1385,6 +1413,26 @@ func rewriteValuePPC64_OpEq8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Eq8 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
if !(isSigned(x.Type) && isSigned(y.Type)) {
break
}
v.reset(OpPPC64Equal)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
// match: (Eq8 x y)
// cond:
// result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
for {
@ -3626,6 +3674,26 @@ func rewriteValuePPC64_OpNeq16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neq16 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
if !(isSigned(x.Type) && isSigned(y.Type)) {
break
}
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
// match: (Neq16 x y)
// cond:
// result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
for {
@ -3715,6 +3783,26 @@ func rewriteValuePPC64_OpNeq8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neq8 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
if !(isSigned(x.Type) && isSigned(y.Type)) {
break
}
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
// match: (Neq8 x y)
// cond:
// result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
for {
@ -4676,6 +4764,24 @@ func rewriteValuePPC64_OpPPC64MOVBZload(v *Value, config *Config) bool {
}
return false
}
func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVBZreg x:(MOVBZload _ _))
// cond:
// result: x
for {
x := v.Args[0]
if x.Op != OpPPC64MOVBZload {
break
}
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVBload(v *Value, config *Config) bool {
b := v.Block
_ = b
@ -4728,6 +4834,24 @@ func rewriteValuePPC64_OpPPC64MOVBload(v *Value, config *Config) bool {
}
return false
}
func rewriteValuePPC64_OpPPC64MOVBreg(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVBreg x:(MOVBload _ _))
// cond:
// result: x
for {
x := v.Args[0]
if x.Op != OpPPC64MOVBload {
break
}
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVBstore(v *Value, config *Config) bool {
b := v.Block
_ = b
@ -5046,6 +5170,24 @@ func rewriteValuePPC64_OpPPC64MOVHZload(v *Value, config *Config) bool {
}
return false
}
func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVHZreg x:(MOVHZload _ _))
// cond:
// result: x
for {
x := v.Args[0]
if x.Op != OpPPC64MOVHZload {
break
}
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVHload(v *Value, config *Config) bool {
b := v.Block
_ = b
@ -5098,6 +5240,24 @@ func rewriteValuePPC64_OpPPC64MOVHload(v *Value, config *Config) bool {
}
return false
}
func rewriteValuePPC64_OpPPC64MOVHreg(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVHreg x:(MOVHload _ _))
// cond:
// result: x
for {
x := v.Args[0]
if x.Op != OpPPC64MOVHload {
break
}
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVHstore(v *Value, config *Config) bool {
b := v.Block
_ = b