mirror of
https://github.com/golang/go
synced 2024-11-20 06:24:40 -07:00
cmd/compile/internal/gc: add support for GOARCH=mips{,le}
Change-Id: Ida4cd647525abce3441bfcb9fdee059344fe717f Reviewed-on: https://go-review.googlesource.com/31477 Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
This commit is contained in:
parent
87f4e36ce7
commit
7f033933ce
@ -279,7 +279,7 @@ func (s *ssaExport) AllocFrame(f *ssa.Func) {
|
|||||||
if haspointers(n.Type) {
|
if haspointers(n.Type) {
|
||||||
stkptrsize = Stksize
|
stkptrsize = Stksize
|
||||||
}
|
}
|
||||||
if Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
|
if Thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
|
||||||
Stksize = Rnd(Stksize, int64(Widthptr))
|
Stksize = Rnd(Stksize, int64(Widthptr))
|
||||||
}
|
}
|
||||||
if Stksize >= 1<<31 {
|
if Stksize >= 1<<31 {
|
||||||
|
@ -6,6 +6,7 @@ package gc
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"html"
|
"html"
|
||||||
"os"
|
"os"
|
||||||
@ -1659,7 +1660,7 @@ func (s *state) expr(n *Node) *ssa.Value {
|
|||||||
|
|
||||||
if ft.IsFloat() || tt.IsFloat() {
|
if ft.IsFloat() || tt.IsFloat() {
|
||||||
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
|
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
|
||||||
if s.config.IntSize == 4 && Thearch.LinkArch.Name != "amd64p32" {
|
if s.config.IntSize == 4 && Thearch.LinkArch.Name != "amd64p32" && Thearch.LinkArch.Family != sys.MIPS {
|
||||||
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
|
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
|
||||||
conv = conv1
|
conv = conv1
|
||||||
}
|
}
|
||||||
@ -1669,6 +1670,27 @@ func (s *state) expr(n *Node) *ssa.Value {
|
|||||||
conv = conv1
|
conv = conv1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if Thearch.LinkArch.Family == sys.MIPS {
|
||||||
|
if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
|
||||||
|
// tt is float32 or float64, and ft is also unsigned
|
||||||
|
if tt.Size() == 4 {
|
||||||
|
return s.uint32Tofloat32(n, x, ft, tt)
|
||||||
|
}
|
||||||
|
if tt.Size() == 8 {
|
||||||
|
return s.uint32Tofloat64(n, x, ft, tt)
|
||||||
|
}
|
||||||
|
} else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
|
||||||
|
// ft is float32 or float64, and tt is unsigned integer
|
||||||
|
if ft.Size() == 4 {
|
||||||
|
return s.float32ToUint32(n, x, ft, tt)
|
||||||
|
}
|
||||||
|
if ft.Size() == 8 {
|
||||||
|
return s.float64ToUint32(n, x, ft, tt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
s.Fatalf("weird float conversion %v -> %v", ft, tt)
|
s.Fatalf("weird float conversion %v -> %v", ft, tt)
|
||||||
}
|
}
|
||||||
@ -1689,7 +1711,7 @@ func (s *state) expr(n *Node) *ssa.Value {
|
|||||||
}
|
}
|
||||||
// Tricky 64-bit unsigned cases.
|
// Tricky 64-bit unsigned cases.
|
||||||
if ft.IsInteger() {
|
if ft.IsInteger() {
|
||||||
// therefore tt is float32 or float64, and ft is also unsigned
|
// tt is float32 or float64, and ft is also unsigned
|
||||||
if tt.Size() == 4 {
|
if tt.Size() == 4 {
|
||||||
return s.uint64Tofloat32(n, x, ft, tt)
|
return s.uint64Tofloat32(n, x, ft, tt)
|
||||||
}
|
}
|
||||||
@ -1698,7 +1720,7 @@ func (s *state) expr(n *Node) *ssa.Value {
|
|||||||
}
|
}
|
||||||
s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
|
s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
|
||||||
}
|
}
|
||||||
// therefore ft is float32 or float64, and tt is unsigned integer
|
// ft is float32 or float64, and tt is unsigned integer
|
||||||
if ft.Size() == 4 {
|
if ft.Size() == 4 {
|
||||||
return s.float32ToUint64(n, x, ft, tt)
|
return s.float32ToUint64(n, x, ft, tt)
|
||||||
}
|
}
|
||||||
@ -2588,10 +2610,10 @@ func intrinsicInit() {
|
|||||||
/******** runtime/internal/sys ********/
|
/******** runtime/internal/sys ********/
|
||||||
intrinsicKey{"runtime/internal/sys", "Ctz32"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
intrinsicKey{"runtime/internal/sys", "Ctz32"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
||||||
return s.newValue1(ssa.OpCtz32, Types[TUINT32], args[0])
|
return s.newValue1(ssa.OpCtz32, Types[TUINT32], args[0])
|
||||||
}, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X),
|
}, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS),
|
||||||
intrinsicKey{"runtime/internal/sys", "Ctz64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
intrinsicKey{"runtime/internal/sys", "Ctz64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
||||||
return s.newValue1(ssa.OpCtz64, Types[TUINT64], args[0])
|
return s.newValue1(ssa.OpCtz64, Types[TUINT64], args[0])
|
||||||
}, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X),
|
}, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS),
|
||||||
intrinsicKey{"runtime/internal/sys", "Bswap32"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
intrinsicKey{"runtime/internal/sys", "Bswap32"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
||||||
return s.newValue1(ssa.OpBswap32, Types[TUINT32], args[0])
|
return s.newValue1(ssa.OpBswap32, Types[TUINT32], args[0])
|
||||||
}, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X),
|
}, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X),
|
||||||
@ -2604,7 +2626,7 @@ func intrinsicInit() {
|
|||||||
v := s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], s.mem())
|
v := s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], s.mem())
|
||||||
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
||||||
return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
|
return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
|
||||||
}, sys.AMD64, sys.ARM64, sys.S390X),
|
}, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS),
|
||||||
intrinsicKey{"runtime/internal/atomic", "Load64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
intrinsicKey{"runtime/internal/atomic", "Load64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
||||||
v := s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], s.mem())
|
v := s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], s.mem())
|
||||||
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
||||||
@ -2614,12 +2636,12 @@ func intrinsicInit() {
|
|||||||
v := s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(ptrto(Types[TUINT8]), ssa.TypeMem), args[0], s.mem())
|
v := s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(ptrto(Types[TUINT8]), ssa.TypeMem), args[0], s.mem())
|
||||||
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
||||||
return s.newValue1(ssa.OpSelect0, ptrto(Types[TUINT8]), v)
|
return s.newValue1(ssa.OpSelect0, ptrto(Types[TUINT8]), v)
|
||||||
}, sys.AMD64, sys.ARM64, sys.S390X),
|
}, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS),
|
||||||
|
|
||||||
intrinsicKey{"runtime/internal/atomic", "Store"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
intrinsicKey{"runtime/internal/atomic", "Store"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
||||||
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, ssa.TypeMem, args[0], args[1], s.mem())
|
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, ssa.TypeMem, args[0], args[1], s.mem())
|
||||||
return nil
|
return nil
|
||||||
}, sys.AMD64, sys.ARM64, sys.S390X),
|
}, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS),
|
||||||
intrinsicKey{"runtime/internal/atomic", "Store64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
intrinsicKey{"runtime/internal/atomic", "Store64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
||||||
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, ssa.TypeMem, args[0], args[1], s.mem())
|
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, ssa.TypeMem, args[0], args[1], s.mem())
|
||||||
return nil
|
return nil
|
||||||
@ -2627,13 +2649,13 @@ func intrinsicInit() {
|
|||||||
intrinsicKey{"runtime/internal/atomic", "StorepNoWB"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
intrinsicKey{"runtime/internal/atomic", "StorepNoWB"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
||||||
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, ssa.TypeMem, args[0], args[1], s.mem())
|
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, ssa.TypeMem, args[0], args[1], s.mem())
|
||||||
return nil
|
return nil
|
||||||
}, sys.AMD64, sys.ARM64, sys.S390X),
|
}, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS),
|
||||||
|
|
||||||
intrinsicKey{"runtime/internal/atomic", "Xchg"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
intrinsicKey{"runtime/internal/atomic", "Xchg"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
||||||
v := s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
|
v := s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
|
||||||
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
||||||
return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
|
return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
|
||||||
}, sys.AMD64, sys.ARM64, sys.S390X),
|
}, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS),
|
||||||
intrinsicKey{"runtime/internal/atomic", "Xchg64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
intrinsicKey{"runtime/internal/atomic", "Xchg64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
||||||
v := s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
|
v := s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
|
||||||
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
||||||
@ -2644,7 +2666,7 @@ func intrinsicInit() {
|
|||||||
v := s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
|
v := s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
|
||||||
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
||||||
return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
|
return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
|
||||||
}, sys.AMD64, sys.ARM64, sys.S390X),
|
}, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS),
|
||||||
intrinsicKey{"runtime/internal/atomic", "Xadd64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
intrinsicKey{"runtime/internal/atomic", "Xadd64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
||||||
v := s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
|
v := s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
|
||||||
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
||||||
@ -2655,7 +2677,7 @@ func intrinsicInit() {
|
|||||||
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
|
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
|
||||||
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
||||||
return s.newValue1(ssa.OpSelect0, Types[TBOOL], v)
|
return s.newValue1(ssa.OpSelect0, Types[TBOOL], v)
|
||||||
}, sys.AMD64, sys.ARM64, sys.S390X),
|
}, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS),
|
||||||
intrinsicKey{"runtime/internal/atomic", "Cas64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
intrinsicKey{"runtime/internal/atomic", "Cas64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
||||||
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
|
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
|
||||||
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
|
||||||
@ -2665,11 +2687,11 @@ func intrinsicInit() {
|
|||||||
intrinsicKey{"runtime/internal/atomic", "And8"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
intrinsicKey{"runtime/internal/atomic", "And8"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
||||||
s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, ssa.TypeMem, args[0], args[1], s.mem())
|
s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, ssa.TypeMem, args[0], args[1], s.mem())
|
||||||
return nil
|
return nil
|
||||||
}, sys.AMD64, sys.ARM64),
|
}, sys.AMD64, sys.ARM64, sys.MIPS),
|
||||||
intrinsicKey{"runtime/internal/atomic", "Or8"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
intrinsicKey{"runtime/internal/atomic", "Or8"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
|
||||||
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, ssa.TypeMem, args[0], args[1], s.mem())
|
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, ssa.TypeMem, args[0], args[1], s.mem())
|
||||||
return nil
|
return nil
|
||||||
}, sys.AMD64, sys.ARM64),
|
}, sys.AMD64, sys.ARM64, sys.MIPS),
|
||||||
}
|
}
|
||||||
|
|
||||||
// aliases internal to runtime/internal/atomic
|
// aliases internal to runtime/internal/atomic
|
||||||
@ -3676,12 +3698,12 @@ func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
|
|||||||
return rptr, rlen, rcap
|
return rptr, rlen, rcap
|
||||||
}
|
}
|
||||||
|
|
||||||
type u2fcvtTab struct {
|
type u642fcvtTab struct {
|
||||||
geq, cvt2F, and, rsh, or, add ssa.Op
|
geq, cvt2F, and, rsh, or, add ssa.Op
|
||||||
one func(*state, ssa.Type, int64) *ssa.Value
|
one func(*state, ssa.Type, int64) *ssa.Value
|
||||||
}
|
}
|
||||||
|
|
||||||
var u64_f64 u2fcvtTab = u2fcvtTab{
|
var u64_f64 u642fcvtTab = u642fcvtTab{
|
||||||
geq: ssa.OpGeq64,
|
geq: ssa.OpGeq64,
|
||||||
cvt2F: ssa.OpCvt64to64F,
|
cvt2F: ssa.OpCvt64to64F,
|
||||||
and: ssa.OpAnd64,
|
and: ssa.OpAnd64,
|
||||||
@ -3691,7 +3713,7 @@ var u64_f64 u2fcvtTab = u2fcvtTab{
|
|||||||
one: (*state).constInt64,
|
one: (*state).constInt64,
|
||||||
}
|
}
|
||||||
|
|
||||||
var u64_f32 u2fcvtTab = u2fcvtTab{
|
var u64_f32 u642fcvtTab = u642fcvtTab{
|
||||||
geq: ssa.OpGeq64,
|
geq: ssa.OpGeq64,
|
||||||
cvt2F: ssa.OpCvt64to32F,
|
cvt2F: ssa.OpCvt64to32F,
|
||||||
and: ssa.OpAnd64,
|
and: ssa.OpAnd64,
|
||||||
@ -3702,14 +3724,14 @@ var u64_f32 u2fcvtTab = u2fcvtTab{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
||||||
return s.uintTofloat(&u64_f64, n, x, ft, tt)
|
return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
||||||
return s.uintTofloat(&u64_f32, n, x, ft, tt)
|
return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *state) uintTofloat(cvttab *u2fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
||||||
// if x >= 0 {
|
// if x >= 0 {
|
||||||
// result = (floatY) x
|
// result = (floatY) x
|
||||||
// } else {
|
// } else {
|
||||||
@ -3768,6 +3790,66 @@ func (s *state) uintTofloat(cvttab *u2fcvtTab, n *Node, x *ssa.Value, ft, tt *Ty
|
|||||||
return s.variable(n, n.Type)
|
return s.variable(n, n.Type)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type u322fcvtTab struct {
|
||||||
|
cvtI2F, cvtF2F ssa.Op
|
||||||
|
}
|
||||||
|
|
||||||
|
var u32_f64 u322fcvtTab = u322fcvtTab{
|
||||||
|
cvtI2F: ssa.OpCvt32to64F,
|
||||||
|
cvtF2F: ssa.OpCopy,
|
||||||
|
}
|
||||||
|
|
||||||
|
var u32_f32 u322fcvtTab = u322fcvtTab{
|
||||||
|
cvtI2F: ssa.OpCvt32to32F,
|
||||||
|
cvtF2F: ssa.OpCvt64Fto32F,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
||||||
|
return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
||||||
|
return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
||||||
|
// if x >= 0 {
|
||||||
|
// result = floatY(x)
|
||||||
|
// } else {
|
||||||
|
// result = floatY(float64(x) + (1<<32))
|
||||||
|
// }
|
||||||
|
cmp := s.newValue2(ssa.OpGeq32, Types[TBOOL], x, s.zeroVal(ft))
|
||||||
|
b := s.endBlock()
|
||||||
|
b.Kind = ssa.BlockIf
|
||||||
|
b.SetControl(cmp)
|
||||||
|
b.Likely = ssa.BranchLikely
|
||||||
|
|
||||||
|
bThen := s.f.NewBlock(ssa.BlockPlain)
|
||||||
|
bElse := s.f.NewBlock(ssa.BlockPlain)
|
||||||
|
bAfter := s.f.NewBlock(ssa.BlockPlain)
|
||||||
|
|
||||||
|
b.AddEdgeTo(bThen)
|
||||||
|
s.startBlock(bThen)
|
||||||
|
a0 := s.newValue1(cvttab.cvtI2F, tt, x)
|
||||||
|
s.vars[n] = a0
|
||||||
|
s.endBlock()
|
||||||
|
bThen.AddEdgeTo(bAfter)
|
||||||
|
|
||||||
|
b.AddEdgeTo(bElse)
|
||||||
|
s.startBlock(bElse)
|
||||||
|
a1 := s.newValue1(ssa.OpCvt32to64F, Types[TFLOAT64], x)
|
||||||
|
twoToThe32 := s.constFloat64(Types[TFLOAT64], float64(1<<32))
|
||||||
|
a2 := s.newValue2(ssa.OpAdd64F, Types[TFLOAT64], a1, twoToThe32)
|
||||||
|
a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
|
||||||
|
|
||||||
|
s.vars[n] = a3
|
||||||
|
s.endBlock()
|
||||||
|
bElse.AddEdgeTo(bAfter)
|
||||||
|
|
||||||
|
s.startBlock(bAfter)
|
||||||
|
return s.variable(n, n.Type)
|
||||||
|
}
|
||||||
|
|
||||||
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
|
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
|
||||||
func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
|
func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
|
||||||
if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
|
if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
|
||||||
@ -3820,22 +3902,50 @@ func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type f2uCvtTab struct {
|
type f2uCvtTab struct {
|
||||||
ltf, cvt2U, subf ssa.Op
|
ltf, cvt2U, subf, or ssa.Op
|
||||||
value func(*state, ssa.Type, float64) *ssa.Value
|
floatValue func(*state, ssa.Type, float64) *ssa.Value
|
||||||
|
intValue func(*state, ssa.Type, int64) *ssa.Value
|
||||||
|
cutoff uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
var f32_u64 f2uCvtTab = f2uCvtTab{
|
var f32_u64 f2uCvtTab = f2uCvtTab{
|
||||||
ltf: ssa.OpLess32F,
|
ltf: ssa.OpLess32F,
|
||||||
cvt2U: ssa.OpCvt32Fto64,
|
cvt2U: ssa.OpCvt32Fto64,
|
||||||
subf: ssa.OpSub32F,
|
subf: ssa.OpSub32F,
|
||||||
value: (*state).constFloat32,
|
or: ssa.OpOr64,
|
||||||
|
floatValue: (*state).constFloat32,
|
||||||
|
intValue: (*state).constInt64,
|
||||||
|
cutoff: 9223372036854775808,
|
||||||
}
|
}
|
||||||
|
|
||||||
var f64_u64 f2uCvtTab = f2uCvtTab{
|
var f64_u64 f2uCvtTab = f2uCvtTab{
|
||||||
ltf: ssa.OpLess64F,
|
ltf: ssa.OpLess64F,
|
||||||
cvt2U: ssa.OpCvt64Fto64,
|
cvt2U: ssa.OpCvt64Fto64,
|
||||||
subf: ssa.OpSub64F,
|
subf: ssa.OpSub64F,
|
||||||
value: (*state).constFloat64,
|
or: ssa.OpOr64,
|
||||||
|
floatValue: (*state).constFloat64,
|
||||||
|
intValue: (*state).constInt64,
|
||||||
|
cutoff: 9223372036854775808,
|
||||||
|
}
|
||||||
|
|
||||||
|
var f32_u32 f2uCvtTab = f2uCvtTab{
|
||||||
|
ltf: ssa.OpLess32F,
|
||||||
|
cvt2U: ssa.OpCvt32Fto32,
|
||||||
|
subf: ssa.OpSub32F,
|
||||||
|
or: ssa.OpOr32,
|
||||||
|
floatValue: (*state).constFloat32,
|
||||||
|
intValue: func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
|
||||||
|
cutoff: 2147483648,
|
||||||
|
}
|
||||||
|
|
||||||
|
var f64_u32 f2uCvtTab = f2uCvtTab{
|
||||||
|
ltf: ssa.OpLess64F,
|
||||||
|
cvt2U: ssa.OpCvt64Fto32,
|
||||||
|
subf: ssa.OpSub64F,
|
||||||
|
or: ssa.OpOr32,
|
||||||
|
floatValue: (*state).constFloat64,
|
||||||
|
intValue: func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
|
||||||
|
cutoff: 2147483648,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
||||||
@ -3845,16 +3955,25 @@ func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value
|
|||||||
return s.floatToUint(&f64_u64, n, x, ft, tt)
|
return s.floatToUint(&f64_u64, n, x, ft, tt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
||||||
|
return s.floatToUint(&f32_u32, n, x, ft, tt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
||||||
|
return s.floatToUint(&f64_u32, n, x, ft, tt)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
|
||||||
// if x < 9223372036854775808.0 {
|
// cutoff:=1<<(intY_Size-1)
|
||||||
|
// if x < floatX(cutoff) {
|
||||||
// result = uintY(x)
|
// result = uintY(x)
|
||||||
// } else {
|
// } else {
|
||||||
// y = x - 9223372036854775808.0
|
// y = x - floatX(cutoff)
|
||||||
// z = uintY(y)
|
// z = uintY(y)
|
||||||
// result = z | -9223372036854775808
|
// result = z | -(cutoff)
|
||||||
// }
|
// }
|
||||||
twoToThe63 := cvttab.value(s, ft, 9223372036854775808.0)
|
cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
|
||||||
cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, twoToThe63)
|
cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, cutoff)
|
||||||
b := s.endBlock()
|
b := s.endBlock()
|
||||||
b.Kind = ssa.BlockIf
|
b.Kind = ssa.BlockIf
|
||||||
b.SetControl(cmp)
|
b.SetControl(cmp)
|
||||||
@ -3873,10 +3992,10 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Ty
|
|||||||
|
|
||||||
b.AddEdgeTo(bElse)
|
b.AddEdgeTo(bElse)
|
||||||
s.startBlock(bElse)
|
s.startBlock(bElse)
|
||||||
y := s.newValue2(cvttab.subf, ft, x, twoToThe63)
|
y := s.newValue2(cvttab.subf, ft, x, cutoff)
|
||||||
y = s.newValue1(cvttab.cvt2U, tt, y)
|
y = s.newValue1(cvttab.cvt2U, tt, y)
|
||||||
z := s.constInt64(tt, -9223372036854775808)
|
z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
|
||||||
a1 := s.newValue2(ssa.OpOr64, tt, y, z)
|
a1 := s.newValue2(cvttab.or, tt, y, z)
|
||||||
s.vars[n] = a1
|
s.vars[n] = a1
|
||||||
s.endBlock()
|
s.endBlock()
|
||||||
bElse.AddEdgeTo(bAfter)
|
bElse.AddEdgeTo(bAfter)
|
||||||
@ -4776,7 +4895,9 @@ func (e *ssaExport) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot
|
|||||||
return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: Types[TUINT32], Off: 0}
|
return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: Types[TUINT32], Off: 0}
|
||||||
}
|
}
|
||||||
// Return the two parts of the larger variable.
|
// Return the two parts of the larger variable.
|
||||||
// Assuming little endian (we don't support big endian 32-bit architecture yet)
|
if Thearch.LinkArch.ByteOrder == binary.BigEndian {
|
||||||
|
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off + 4}
|
||||||
|
}
|
||||||
return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off}
|
return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -686,7 +686,7 @@ opswitch:
|
|||||||
|
|
||||||
if n.Left.Op == ONAME && n.Left.Sym.Name == "Sqrt" &&
|
if n.Left.Op == ONAME && n.Left.Sym.Name == "Sqrt" &&
|
||||||
(n.Left.Sym.Pkg.Path == "math" || n.Left.Sym.Pkg == localpkg && myimportpath == "math") {
|
(n.Left.Sym.Pkg.Path == "math" || n.Left.Sym.Pkg == localpkg && myimportpath == "math") {
|
||||||
if Thearch.LinkArch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
|
if Thearch.LinkArch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X) {
|
||||||
n.Op = OSQRT
|
n.Op = OSQRT
|
||||||
n.Left = n.List.First()
|
n.Left = n.List.First()
|
||||||
n.List.Set(nil)
|
n.List.Set(nil)
|
||||||
@ -1018,7 +1018,7 @@ opswitch:
|
|||||||
n = walkexpr(n, init)
|
n = walkexpr(n, init)
|
||||||
|
|
||||||
case OCONV, OCONVNOP:
|
case OCONV, OCONVNOP:
|
||||||
if Thearch.LinkArch.Family == sys.ARM {
|
if Thearch.LinkArch.Family == sys.ARM || Thearch.LinkArch.Family == sys.MIPS {
|
||||||
if n.Left.Type.IsFloat() {
|
if n.Left.Type.IsFloat() {
|
||||||
if n.Type.Etype == TINT64 {
|
if n.Type.Etype == TINT64 {
|
||||||
n = mkcall("float64toint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
|
n = mkcall("float64toint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
|
||||||
@ -3277,7 +3277,7 @@ func samecheap(a *Node, b *Node) bool {
|
|||||||
// The result of walkrotate MUST be assigned back to n, e.g.
|
// The result of walkrotate MUST be assigned back to n, e.g.
|
||||||
// n.Left = walkrotate(n.Left)
|
// n.Left = walkrotate(n.Left)
|
||||||
func walkrotate(n *Node) *Node {
|
func walkrotate(n *Node) *Node {
|
||||||
if Thearch.LinkArch.InFamily(sys.MIPS64, sys.PPC64) {
|
if Thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.PPC64) {
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user