diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 47a6af003c9..87fe0fbee13 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -2073,7 +2073,7 @@ (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) // We don't need the conditional move if we know the arg of BSF is not zero. -(CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) && c != 0 => x +(CMOVQEQ x _ (Select1 (BS(F|R)Q (ORQconst [c] _)))) && c != 0 => x // Extension is unnecessary for trailing zeros. (BSFQ (ORQconst [1<<8] (MOVBQZX x))) => (BSFQ (ORQconst [1<<8] x)) (BSFQ (ORQconst [1<<16] (MOVWQZX x))) => (BSFQ (ORQconst [1<<16] x)) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 0c789d6b496..c17d8b03e20 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -5191,6 +5191,29 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { v.copyOf(x) return true } + // match: (CMOVQEQ x _ (Select1 (BSRQ (ORQconst [c] _)))) + // cond: c != 0 + // result: x + for { + x := v_0 + if v_2.Op != OpSelect1 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpAMD64BSRQ { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpAMD64ORQconst { + break + } + c := auxIntToInt32(v_2_0_0.AuxInt) + if !(c != 0) { + break + } + v.copyOf(x) + return true + } return false } func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {