1
0
mirror of https://github.com/golang/go synced 2024-11-19 12:04:43 -07:00

cmd/compile: add wasm architecture

This commit adds the wasm architecture to the compile command.
A later commit will contain the corresponding linker changes.

Design doc: https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4

The following files are generated:
- src/cmd/compile/internal/ssa/opGen.go
- src/cmd/compile/internal/ssa/rewriteWasm.go
- src/cmd/internal/obj/wasm/anames.go

Updates #18892

Change-Id: Ifb4a96a3e427aac2362a1c97967d5667450fba3b
Reviewed-on: https://go-review.googlesource.com/103295
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
This commit is contained in:
Richard Musiol 2018-03-29 00:55:53 +02:00 committed by Brad Fitzpatrick
parent a9fc375258
commit 3b137dd2df
25 changed files with 9884 additions and 9 deletions

View File

@ -11,6 +11,7 @@ import (
"cmd/internal/obj/mips"
"cmd/internal/obj/ppc64"
"cmd/internal/obj/s390x"
"cmd/internal/obj/wasm"
"cmd/internal/obj/x86"
"fmt"
"strings"
@ -87,6 +88,8 @@ func Set(GOARCH string) *Arch {
a := archS390x()
a.LinkArch = &s390x.Links390x
return a
case "wasm":
return archWasm()
}
return nil
}
@ -95,6 +98,10 @@ func jumpX86(word string) bool {
return word[0] == 'J' || word == "CALL" || strings.HasPrefix(word, "LOOP") || word == "XBEGIN"
}
func jumpWasm(word string) bool {
return word == "JMP" || word == "CALL" || word == "Call" || word == "Br" || word == "BrIf"
}
func archX86(linkArch *obj.LinkArch) *Arch {
register := make(map[string]int16)
// Create maps for easy lookup of instruction names etc.
@ -577,3 +584,24 @@ func archS390x() *Arch {
IsJump: jumpS390x,
}
}
func archWasm() *Arch {
instructions := make(map[string]obj.As)
for i, s := range obj.Anames {
instructions[s] = obj.As(i)
}
for i, s := range wasm.Anames {
if obj.As(i) >= obj.A_ARCHSPECIFIC {
instructions[s] = obj.As(i) + obj.ABaseWasm
}
}
return &Arch{
LinkArch: &wasm.Linkwasm,
Instructions: instructions,
Register: wasm.Register,
RegisterPrefix: nil,
RegisterNumber: nilRegisterNumber,
IsJump: jumpWasm,
}
}

View File

@ -343,6 +343,13 @@ func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) {
As: op,
}
switch len(a) {
case 0:
if p.arch.Family == sys.Wasm {
target = &obj.Addr{Type: obj.TYPE_NONE}
break
}
p.errorf("wrong number of arguments to %s instruction", op)
return
case 1:
target = &a[0]
case 2:
@ -445,6 +452,8 @@ func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) {
case target.Type == obj.TYPE_CONST:
// JMP $4
prog.To = a[0]
case target.Type == obj.TYPE_NONE:
// JMP
default:
p.errorf("cannot assemble jump %+v", target)
return

View File

@ -311,4 +311,12 @@ var (
// GO386=387
ControlWord64trunc,
ControlWord32 *obj.LSym
// Wasm
WasmMove,
WasmZero,
WasmDiv,
WasmTruncS,
WasmTruncU,
SigPanic *obj.LSym
)

View File

@ -180,6 +180,7 @@ func Main(archInit func(*Arch)) {
gopkg = types.NewPkg("go", "")
Nacl = objabi.GOOS == "nacl"
Wasm := objabi.GOARCH == "wasm"
flag.BoolVar(&compiling_runtime, "+", false, "compiling runtime")
flag.BoolVar(&compiling_std, "std", false, "compiling standard library")
@ -200,7 +201,7 @@ func Main(archInit func(*Arch)) {
flag.IntVar(&nBackendWorkers, "c", 1, "concurrency during compilation, 1 means no concurrency")
flag.BoolVar(&pure_go, "complete", false, "compiling complete package (no C or assembly)")
flag.StringVar(&debugstr, "d", "", "print debug information about items in `list`; try -d help")
flag.BoolVar(&flagDWARF, "dwarf", true, "generate DWARF symbols")
flag.BoolVar(&flagDWARF, "dwarf", !Wasm, "generate DWARF symbols")
flag.BoolVar(&Ctxt.Flag_locationlists, "dwarflocationlists", true, "add location lists to DWARF in optimized mode")
flag.IntVar(&genDwarfInline, "gendwarfinl", 2, "generate DWARF inline info records")
objabi.Flagcount("e", "no limit on number of errors reported", &Debug['e'])
@ -265,6 +266,7 @@ func Main(archInit func(*Arch)) {
} else {
// turn off inline generation if no dwarf at all
genDwarfInline = 0
Ctxt.Flag_locationlists = false
}
if flag.NArg() < 1 && debugstr != "help" && debugstr != "ssa/help" {

View File

@ -87,6 +87,14 @@ func initssaconfig() {
// GO386=387 runtime functions
ControlWord64trunc = sysfunc("controlWord64trunc")
ControlWord32 = sysfunc("controlWord32")
// Wasm
WasmMove = sysfunc("wasmMove")
WasmZero = sysfunc("wasmZero")
WasmDiv = sysfunc("wasmDiv")
WasmTruncS = sysfunc("wasmTruncS")
WasmTruncU = sysfunc("wasmTruncU")
SigPanic = sysfunc("sigpanic")
}
// buildssa builds an SSA function for fn.
@ -1794,7 +1802,7 @@ func (s *state) expr(n *Node) *ssa.Value {
conv = conv1
}
}
if thearch.LinkArch.Family == sys.ARM64 || s.softFloat {
if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || s.softFloat {
if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
@ -5222,7 +5230,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
} else {
// TODO(mdempsky): Can these differences be eliminated?
switch thearch.LinkArch.Family {
case sys.AMD64, sys.I386, sys.PPC64, sys.S390X:
case sys.AMD64, sys.I386, sys.PPC64, sys.S390X, sys.Wasm:
p.To.Type = obj.TYPE_REG
case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
p.To.Type = obj.TYPE_MEM

View File

@ -311,6 +311,20 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
c.LinkReg = linkRegMIPS
c.hasGReg = true
c.noDuffDevice = true
case "wasm":
c.PtrSize = 8
c.RegSize = 8
c.lowerBlock = rewriteBlockWasm
c.lowerValue = rewriteValueWasm
c.registers = registersWasm[:]
c.gpRegMask = gpRegMaskWasm
c.fpRegMask = fpRegMaskWasm
c.FPReg = framepointerRegWasm
c.LinkReg = linkRegWasm
c.hasGReg = true
c.noDuffDevice = true
c.useAvg = false
c.useHmul = false
default:
ctxt.Diag("arch %s not implemented", arch)
}

View File

@ -0,0 +1,391 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Lowering arithmetic
(Add(64|32|16|8|Ptr) x y) -> (I64Add x y)
(Add(64|32)F x y) -> (F64Add x y)
(Sub(64|32|16|8|Ptr) x y) -> (I64Sub x y)
(Sub(64|32)F x y) -> (F64Sub x y)
(Mul(64|32|16|8) x y) -> (I64Mul x y)
(Mul(64|32)F x y) -> (F64Mul x y)
(Div64 x y) -> (I64DivS x y)
(Div64u x y) -> (I64DivU x y)
(Div32 x y) -> (I64DivS (SignExt32to64 x) (SignExt32to64 y))
(Div32u x y) -> (I64DivU (ZeroExt32to64 x) (ZeroExt32to64 y))
(Div16 x y) -> (I64DivS (SignExt16to64 x) (SignExt16to64 y))
(Div16u x y) -> (I64DivU (ZeroExt16to64 x) (ZeroExt16to64 y))
(Div8 x y) -> (I64DivS (SignExt8to64 x) (SignExt8to64 y))
(Div8u x y) -> (I64DivU (ZeroExt8to64 x) (ZeroExt8to64 y))
(Div(64|32)F x y) -> (F64Div x y)
(Mod64 x y) -> (I64RemS x y)
(Mod64u x y) -> (I64RemU x y)
(Mod32 x y) -> (I64RemS (SignExt32to64 x) (SignExt32to64 y))
(Mod32u x y) -> (I64RemU (ZeroExt32to64 x) (ZeroExt32to64 y))
(Mod16 x y) -> (I64RemS (SignExt16to64 x) (SignExt16to64 y))
(Mod16u x y) -> (I64RemU (ZeroExt16to64 x) (ZeroExt16to64 y))
(Mod8 x y) -> (I64RemS (SignExt8to64 x) (SignExt8to64 y))
(Mod8u x y) -> (I64RemU (ZeroExt8to64 x) (ZeroExt8to64 y))
(And(64|32|16|8|B) x y) -> (I64And x y)
(Or(64|32|16|8|B) x y) -> (I64Or x y)
(Xor(64|32|16|8) x y) -> (I64Xor x y)
(Neg(64|32|16|8) x) -> (I64Sub (I64Const [0]) x)
(Neg32F x) -> (F64Neg x)
(Neg64F x) -> (F64Neg x)
(Com(64|32|16|8) x) -> (I64Xor x (I64Const [-1]))
(Not x) -> (I64Eqz x)
// Lowering pointer arithmetic
(OffPtr [0] ptr) -> ptr
(OffPtr [off] ptr) && off > 0 -> (I64AddConst [off] ptr)
// Lowering extension
(SignExt32to64 x) -> (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32]))
(SignExt16to(64|32) x) -> (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48]))
(SignExt8to(64|32|16) x) -> (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
(ZeroExt32to64 x) -> (I64ShrU (I64Shl x (I64Const [32])) (I64Const [32]))
(ZeroExt16to(64|32) x) -> (I64ShrU (I64Shl x (I64Const [48])) (I64Const [48]))
(ZeroExt8to(64|32|16) x) -> (I64ShrU (I64Shl x (I64Const [56])) (I64Const [56]))
(Slicemask x) -> (I64ShrS (I64Sub (I64Const [0]) x) (I64Const [63]))
// Lowering truncation
// Because we ignore the high parts, truncates are just copies.
(Trunc64to(32|16|8) x) -> x
(Trunc32to(16|8) x) -> x
(Trunc16to8 x) -> x
// Lowering float <-> int
(Cvt32to32F x) -> (LoweredRound32F (F64ConvertSI64 (SignExt32to64 x)))
(Cvt32to64F x) -> (F64ConvertSI64 (SignExt32to64 x))
(Cvt64to32F x) -> (LoweredRound32F (F64ConvertSI64 x))
(Cvt64to64F x) -> (F64ConvertSI64 x)
(Cvt32Uto32F x) -> (LoweredRound32F (F64ConvertUI64 (ZeroExt32to64 x)))
(Cvt32Uto64F x) -> (F64ConvertUI64 (ZeroExt32to64 x))
(Cvt64Uto32F x) -> (LoweredRound32F (F64ConvertUI64 x))
(Cvt64Uto64F x) -> (F64ConvertUI64 x)
(Cvt32Fto32 x) -> (I64TruncSF64 x)
(Cvt32Fto64 x) -> (I64TruncSF64 x)
(Cvt64Fto32 x) -> (I64TruncSF64 x)
(Cvt64Fto64 x) -> (I64TruncSF64 x)
(Cvt32Fto32U x) -> (I64TruncUF64 x)
(Cvt32Fto64U x) -> (I64TruncUF64 x)
(Cvt64Fto32U x) -> (I64TruncUF64 x)
(Cvt64Fto64U x) -> (I64TruncUF64 x)
(Cvt32Fto64F x) -> x
(Cvt64Fto32F x) -> (LoweredRound32F x)
(Round32F x) -> (LoweredRound32F x)
(Round64F x) -> x
// Lowering shifts
// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
(Lsh64x64 x y) -> (Select (I64Shl x y) (I64Const [0]) (I64LtU y (I64Const [64])))
(Lsh64x32 x y) -> (Lsh64x64 x (ZeroExt32to64 y))
(Lsh64x16 x y) -> (Lsh64x64 x (ZeroExt16to64 y))
(Lsh64x8 x y) -> (Lsh64x64 x (ZeroExt8to64 y))
(Lsh32x64 x y) -> (Lsh64x64 x y)
(Lsh32x32 x y) -> (Lsh64x64 x (ZeroExt32to64 y))
(Lsh32x16 x y) -> (Lsh64x64 x (ZeroExt16to64 y))
(Lsh32x8 x y) -> (Lsh64x64 x (ZeroExt8to64 y))
(Lsh16x64 x y) -> (Lsh64x64 x y)
(Lsh16x32 x y) -> (Lsh64x64 x (ZeroExt32to64 y))
(Lsh16x16 x y) -> (Lsh64x64 x (ZeroExt16to64 y))
(Lsh16x8 x y) -> (Lsh64x64 x (ZeroExt8to64 y))
(Lsh8x64 x y) -> (Lsh64x64 x y)
(Lsh8x32 x y) -> (Lsh64x64 x (ZeroExt32to64 y))
(Lsh8x16 x y) -> (Lsh64x64 x (ZeroExt16to64 y))
(Lsh8x8 x y) -> (Lsh64x64 x (ZeroExt8to64 y))
(Rsh64Ux64 x y) -> (Select (I64ShrU x y) (I64Const [0]) (I64LtU y (I64Const [64])))
(Rsh64Ux32 x y) -> (Rsh64Ux64 x (ZeroExt32to64 y))
(Rsh64Ux16 x y) -> (Rsh64Ux64 x (ZeroExt16to64 y))
(Rsh64Ux8 x y) -> (Rsh64Ux64 x (ZeroExt8to64 y))
(Rsh32Ux64 x y) -> (Rsh64Ux64 (ZeroExt32to64 x) y)
(Rsh32Ux32 x y) -> (Rsh64Ux64 (ZeroExt32to64 x) (ZeroExt32to64 y))
(Rsh32Ux16 x y) -> (Rsh64Ux64 (ZeroExt32to64 x) (ZeroExt16to64 y))
(Rsh32Ux8 x y) -> (Rsh64Ux64 (ZeroExt32to64 x) (ZeroExt8to64 y))
(Rsh16Ux64 x y) -> (Rsh64Ux64 (ZeroExt16to64 x) y)
(Rsh16Ux32 x y) -> (Rsh64Ux64 (ZeroExt16to64 x) (ZeroExt32to64 y))
(Rsh16Ux16 x y) -> (Rsh64Ux64 (ZeroExt16to64 x) (ZeroExt16to64 y))
(Rsh16Ux8 x y) -> (Rsh64Ux64 (ZeroExt16to64 x) (ZeroExt8to64 y))
(Rsh8Ux64 x y) -> (Rsh64Ux64 (ZeroExt8to64 x) y)
(Rsh8Ux32 x y) -> (Rsh64Ux64 (ZeroExt8to64 x) (ZeroExt32to64 y))
(Rsh8Ux16 x y) -> (Rsh64Ux64 (ZeroExt8to64 x) (ZeroExt16to64 y))
(Rsh8Ux8 x y) -> (Rsh64Ux64 (ZeroExt8to64 x) (ZeroExt8to64 y))
// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
// We implement this by setting the shift value to (width - 1) if the shift value is >= width.
(Rsh64x64 x y) -> (I64ShrS x (Select <typ.Int64> y (I64Const [63]) (I64LtU y (I64Const [64]))))
(Rsh64x32 x y) -> (Rsh64x64 x (ZeroExt32to64 y))
(Rsh64x16 x y) -> (Rsh64x64 x (ZeroExt16to64 y))
(Rsh64x8 x y) -> (Rsh64x64 x (ZeroExt8to64 y))
(Rsh32x64 x y) -> (Rsh64x64 (SignExt32to64 x) y)
(Rsh32x32 x y) -> (Rsh64x64 (SignExt32to64 x) (ZeroExt32to64 y))
(Rsh32x16 x y) -> (Rsh64x64 (SignExt32to64 x) (ZeroExt16to64 y))
(Rsh32x8 x y) -> (Rsh64x64 (SignExt32to64 x) (ZeroExt8to64 y))
(Rsh16x64 x y) -> (Rsh64x64 (SignExt16to64 x) y)
(Rsh16x32 x y) -> (Rsh64x64 (SignExt16to64 x) (ZeroExt32to64 y))
(Rsh16x16 x y) -> (Rsh64x64 (SignExt16to64 x) (ZeroExt16to64 y))
(Rsh16x8 x y) -> (Rsh64x64 (SignExt16to64 x) (ZeroExt8to64 y))
(Rsh8x64 x y) -> (Rsh64x64 (SignExt8to64 x) y)
(Rsh8x32 x y) -> (Rsh64x64 (SignExt8to64 x) (ZeroExt32to64 y))
(Rsh8x16 x y) -> (Rsh64x64 (SignExt8to64 x) (ZeroExt16to64 y))
(Rsh8x8 x y) -> (Rsh64x64 (SignExt8to64 x) (ZeroExt8to64 y))
// Lowering comparisons
(Less64 x y) -> (I64LtS x y)
(Less32 x y) -> (I64LtS (SignExt32to64 x) (SignExt32to64 y))
(Less16 x y) -> (I64LtS (SignExt16to64 x) (SignExt16to64 y))
(Less8 x y) -> (I64LtS (SignExt8to64 x) (SignExt8to64 y))
(Less64U x y) -> (I64LtU x y)
(Less32U x y) -> (I64LtU (ZeroExt32to64 x) (ZeroExt32to64 y))
(Less16U x y) -> (I64LtU (ZeroExt16to64 x) (ZeroExt16to64 y))
(Less8U x y) -> (I64LtU (ZeroExt8to64 x) (ZeroExt8to64 y))
(Less64F x y) -> (F64Lt x y)
(Less32F x y) -> (F64Lt (LoweredRound32F x) (LoweredRound32F y))
(Leq64 x y) -> (I64LeS x y)
(Leq32 x y) -> (I64LeS (SignExt32to64 x) (SignExt32to64 y))
(Leq16 x y) -> (I64LeS (SignExt16to64 x) (SignExt16to64 y))
(Leq8 x y) -> (I64LeS (SignExt8to64 x) (SignExt8to64 y))
(Leq64U x y) -> (I64LeU x y)
(Leq32U x y) -> (I64LeU (ZeroExt32to64 x) (ZeroExt32to64 y))
(Leq16U x y) -> (I64LeU (ZeroExt16to64 x) (ZeroExt16to64 y))
(Leq8U x y) -> (I64LeU (ZeroExt8to64 x) (ZeroExt8to64 y))
(Leq64F x y) -> (F64Le x y)
(Leq32F x y) -> (F64Le (LoweredRound32F x) (LoweredRound32F y))
(Greater64 x y) -> (I64GtS x y)
(Greater32 x y) -> (I64GtS (SignExt32to64 x) (SignExt32to64 y))
(Greater16 x y) -> (I64GtS (SignExt16to64 x) (SignExt16to64 y))
(Greater8 x y) -> (I64GtS (SignExt8to64 x) (SignExt8to64 y))
(Greater64U x y) -> (I64GtU x y)
(Greater32U x y) -> (I64GtU (ZeroExt32to64 x) (ZeroExt32to64 y))
(Greater16U x y) -> (I64GtU (ZeroExt16to64 x) (ZeroExt16to64 y))
(Greater8U x y) -> (I64GtU (ZeroExt8to64 x) (ZeroExt8to64 y))
(Greater64F x y) -> (F64Gt x y)
(Greater32F x y) -> (F64Gt (LoweredRound32F x) (LoweredRound32F y))
(Geq64 x y) -> (I64GeS x y)
(Geq32 x y) -> (I64GeS (SignExt32to64 x) (SignExt32to64 y))
(Geq16 x y) -> (I64GeS (SignExt16to64 x) (SignExt16to64 y))
(Geq8 x y) -> (I64GeS (SignExt8to64 x) (SignExt8to64 y))
(Geq64U x y) -> (I64GeU x y)
(Geq32U x y) -> (I64GeU (ZeroExt32to64 x) (ZeroExt32to64 y))
(Geq16U x y) -> (I64GeU (ZeroExt16to64 x) (ZeroExt16to64 y))
(Geq8U x y) -> (I64GeU (ZeroExt8to64 x) (ZeroExt8to64 y))
(Geq64F x y) -> (F64Ge x y)
(Geq32F x y) -> (F64Ge (LoweredRound32F x) (LoweredRound32F y))
(Eq64 x y) -> (I64Eq x y)
(Eq32 x y) -> (I64Eq (ZeroExt32to64 x) (ZeroExt32to64 y))
(Eq16 x y) -> (I64Eq (ZeroExt16to64 x) (ZeroExt16to64 y))
(Eq8 x y) -> (I64Eq (ZeroExt8to64 x) (ZeroExt8to64 y))
(EqB x y) -> (I64Eq x y)
(EqPtr x y) -> (I64Eq x y)
(Eq64F x y) -> (F64Eq x y)
(Eq32F x y) -> (F64Eq (LoweredRound32F x) (LoweredRound32F y))
(Neq64 x y) -> (I64Ne x y)
(Neq32 x y) -> (I64Ne (ZeroExt32to64 x) (ZeroExt32to64 y))
(Neq16 x y) -> (I64Ne (ZeroExt16to64 x) (ZeroExt16to64 y))
(Neq8 x y) -> (I64Ne (ZeroExt8to64 x) (ZeroExt8to64 y))
(NeqB x y) -> (I64Ne x y)
(NeqPtr x y) -> (I64Ne x y)
(Neq64F x y) -> (F64Ne x y)
(Neq32F x y) -> (F64Ne (LoweredRound32F x) (LoweredRound32F y))
// Lowering loads
(Load <t> ptr mem) && is32BitFloat(t) -> (F32Load ptr mem)
(Load <t> ptr mem) && is64BitFloat(t) -> (F64Load ptr mem)
(Load <t> ptr mem) && t.Size() == 8 -> (I64Load ptr mem)
(Load <t> ptr mem) && t.Size() == 4 && !t.IsSigned() -> (I64Load32U ptr mem)
(Load <t> ptr mem) && t.Size() == 4 && t.IsSigned() -> (I64Load32S ptr mem)
(Load <t> ptr mem) && t.Size() == 2 && !t.IsSigned() -> (I64Load16U ptr mem)
(Load <t> ptr mem) && t.Size() == 2 && t.IsSigned() -> (I64Load16S ptr mem)
(Load <t> ptr mem) && t.Size() == 1 && !t.IsSigned() -> (I64Load8U ptr mem)
(Load <t> ptr mem) && t.Size() == 1 && t.IsSigned() -> (I64Load8S ptr mem)
// Lowering stores
(Store {t} ptr val mem) && is64BitFloat(t.(*types.Type)) -> (F64Store ptr val mem)
(Store {t} ptr val mem) && is32BitFloat(t.(*types.Type)) -> (F32Store ptr val mem)
(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 -> (I64Store ptr val mem)
(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 -> (I64Store32 ptr val mem)
(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (I64Store16 ptr val mem)
(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (I64Store8 ptr val mem)
// Lowering moves
(Move [0] _ _ mem) -> mem
(Move [1] dst src mem) -> (I64Store8 dst (I64Load8U src mem) mem)
(Move [2] dst src mem) -> (I64Store16 dst (I64Load16U src mem) mem)
(Move [4] dst src mem) -> (I64Store32 dst (I64Load32U src mem) mem)
(Move [8] dst src mem) -> (I64Store dst (I64Load src mem) mem)
(Move [16] dst src mem) ->
(I64Store [8] dst (I64Load [8] src mem)
(I64Store dst (I64Load src mem) mem))
(Move [3] dst src mem) ->
(I64Store8 [2] dst (I64Load8U [2] src mem)
(I64Store16 dst (I64Load16U src mem) mem))
(Move [5] dst src mem) ->
(I64Store8 [4] dst (I64Load8U [4] src mem)
(I64Store32 dst (I64Load32U src mem) mem))
(Move [6] dst src mem) ->
(I64Store16 [4] dst (I64Load16U [4] src mem)
(I64Store32 dst (I64Load32U src mem) mem))
(Move [7] dst src mem) ->
(I64Store32 [3] dst (I64Load32U [3] src mem)
(I64Store32 dst (I64Load32U src mem) mem))
(Move [s] dst src mem) && s > 8 && s < 16 ->
(I64Store [s-8] dst (I64Load [s-8] src mem)
(I64Store dst (I64Load src mem) mem))
// Adjust moves to be a multiple of 16 bytes.
(Move [s] dst src mem)
&& s > 16 && s%16 != 0 && s%16 <= 8 ->
(Move [s-s%16]
(OffPtr <dst.Type> dst [s%16])
(OffPtr <src.Type> src [s%16])
(I64Store dst (I64Load src mem) mem))
(Move [s] dst src mem)
&& s > 16 && s%16 != 0 && s%16 > 8 ->
(Move [s-s%16]
(OffPtr <dst.Type> dst [s%16])
(OffPtr <src.Type> src [s%16])
(I64Store [8] dst (I64Load [8] src mem)
(I64Store dst (I64Load src mem) mem)))
// Large copying uses helper.
(Move [s] dst src mem) && s%8 == 0 ->
(LoweredMove [s/8] dst src mem)
// Lowering Zero instructions
(Zero [0] _ mem) -> mem
(Zero [1] destptr mem) -> (I64Store8 destptr (I64Const [0]) mem)
(Zero [2] destptr mem) -> (I64Store16 destptr (I64Const [0]) mem)
(Zero [4] destptr mem) -> (I64Store32 destptr (I64Const [0]) mem)
(Zero [8] destptr mem) -> (I64Store destptr (I64Const [0]) mem)
(Zero [3] destptr mem) ->
(I64Store8 [2] destptr (I64Const [0])
(I64Store16 destptr (I64Const [0]) mem))
(Zero [5] destptr mem) ->
(I64Store8 [4] destptr (I64Const [0])
(I64Store32 destptr (I64Const [0]) mem))
(Zero [6] destptr mem) ->
(I64Store16 [4] destptr (I64Const [0])
(I64Store32 destptr (I64Const [0]) mem))
(Zero [7] destptr mem) ->
(I64Store32 [3] destptr (I64Const [0])
(I64Store32 destptr (I64Const [0]) mem))
// Strip off any fractional word zeroing.
(Zero [s] destptr mem) && s%8 != 0 && s > 8 ->
(Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
(I64Store destptr (I64Const [0]) mem))
// Zero small numbers of words directly.
(Zero [16] destptr mem) ->
(I64Store [8] destptr (I64Const [0])
(I64Store destptr (I64Const [0]) mem))
(Zero [24] destptr mem) ->
(I64Store [16] destptr (I64Const [0])
(I64Store [8] destptr (I64Const [0])
(I64Store destptr (I64Const [0]) mem)))
(Zero [32] destptr mem) ->
(I64Store [24] destptr (I64Const [0])
(I64Store [16] destptr (I64Const [0])
(I64Store [8] destptr (I64Const [0])
(I64Store destptr (I64Const [0]) mem))))
// Large zeroing uses helper.
(Zero [s] destptr mem) && s%8 == 0 && s > 32 ->
(LoweredZero [s/8] destptr mem)
// Lowering constants
(Const(64|32|16|8) [val]) -> (I64Const [val])
(Const(64|32)F [val]) -> (F64Const [val])
(ConstNil) -> (I64Const [0])
(ConstBool [b]) -> (I64Const [b])
// Lowering calls
(StaticCall [argwid] {target} mem) -> (LoweredStaticCall [argwid] {target} mem)
(ClosureCall [argwid] entry closure mem) -> (LoweredClosureCall [argwid] entry closure mem)
(InterCall [argwid] entry mem) -> (LoweredInterCall [argwid] entry mem)
// Miscellaneous
(Convert <t> x mem) -> (LoweredConvert <t> x mem)
(IsNonNil p) -> (I64Eqz (I64Eqz p))
(IsInBounds idx len) -> (I64LtU idx len)
(IsSliceInBounds idx len) -> (I64LeU idx len)
(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
(GetClosurePtr) -> (LoweredGetClosurePtr)
(GetCallerPC) -> (LoweredGetCallerPC)
(GetCallerSP) -> (LoweredGetCallerSP)
(Addr {sym} base) -> (LoweredAddr {sym} base)
// Write barrier.
(WB {fn} destptr srcptr mem) -> (LoweredWB {fn} destptr srcptr mem)
// --- Optimizations ---
(I64Add (I64Const [x]) (I64Const [y])) -> (I64Const [x + y])
(I64Mul (I64Const [x]) (I64Const [y])) -> (I64Const [x * y])
(I64And (I64Const [x]) (I64Const [y])) -> (I64Const [x & y])
(I64Or (I64Const [x]) (I64Const [y])) -> (I64Const [x | y])
(I64Xor (I64Const [x]) (I64Const [y])) -> (I64Const [x ^ y])
(F64Add (F64Const [x]) (F64Const [y])) -> (F64Const [f2i(i2f(x) + i2f(y))])
(F64Mul (F64Const [x]) (F64Const [y])) -> (F64Const [f2i(i2f(x) * i2f(y))])
(I64Eq (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [1])
(I64Eq (I64Const [x]) (I64Const [y])) && x != y -> (I64Const [0])
(I64Ne (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [0])
(I64Ne (I64Const [x]) (I64Const [y])) && x != y -> (I64Const [1])
(I64Add (I64Const [x]) y) -> (I64Add y (I64Const [x]))
(I64Mul (I64Const [x]) y) -> (I64Mul y (I64Const [x]))
(I64And (I64Const [x]) y) -> (I64And y (I64Const [x]))
(I64Or (I64Const [x]) y) -> (I64Or y (I64Const [x]))
(I64Xor (I64Const [x]) y) -> (I64Xor y (I64Const [x]))
(F64Add (F64Const [x]) y) -> (F64Add y (F64Const [x]))
(F64Mul (F64Const [x]) y) -> (F64Mul y (F64Const [x]))
(I64Eq (I64Const [x]) y) -> (I64Eq y (I64Const [x]))
(I64Ne (I64Const [x]) y) -> (I64Ne y (I64Const [x]))
(I64Add x (I64Const [y])) -> (I64AddConst [y] x)
(I64Eqz (I64Eqz (I64Eqz x))) -> (I64Eqz x)
(I64Store8 [off] (I64AddConst [off2] ptr) val mem) && off+off2 >= 0 -> (I64Store8 [off+off2] ptr val mem)
(I64Store16 [off] (I64AddConst [off2] ptr) val mem) && off+off2 >= 0 -> (I64Store16 [off+off2] ptr val mem)
(I64Store32 [off] (I64AddConst [off2] ptr) val mem) && off+off2 >= 0 -> (I64Store32 [off+off2] ptr val mem)
(I64Store [off] (I64AddConst [off2] ptr) val mem) && off+off2 >= 0 -> (I64Store [off+off2] ptr val mem)
(I64Load8U [off] (I64AddConst [off2] ptr) mem) && off+off2 >= 0 -> (I64Load8U [off+off2] ptr mem)
(I64Load8S [off] (I64AddConst [off2] ptr) mem) && off+off2 >= 0 -> (I64Load8S [off+off2] ptr mem)
(I64Load16U [off] (I64AddConst [off2] ptr) mem) && off+off2 >= 0 -> (I64Load16U [off+off2] ptr mem)
(I64Load16S [off] (I64AddConst [off2] ptr) mem) && off+off2 >= 0 -> (I64Load16S [off+off2] ptr mem)
(I64Load32U [off] (I64AddConst [off2] ptr) mem) && off+off2 >= 0 -> (I64Load32U [off+off2] ptr mem)
(I64Load32S [off] (I64AddConst [off2] ptr) mem) && off+off2 >= 0 -> (I64Load32S [off+off2] ptr mem)
(I64Load [off] (I64AddConst [off2] ptr) mem) && off+off2 >= 0 -> (I64Load [off+off2] ptr mem)

View File

@ -0,0 +1,208 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import "strings"
var regNamesWasm = []string{
"R0",
"R1",
"R2",
"R3",
"R4",
"R5",
"R6",
"R7",
"R8",
"R9",
"R10",
"R11",
"R12",
"R13",
"R14",
"R15",
"F0",
"F1",
"F2",
"F3",
"F4",
"F5",
"F6",
"F7",
"F8",
"F9",
"F10",
"F11",
"F12",
"F13",
"F14",
"F15",
"SP",
"g",
// pseudo-registers
"SB",
}
func init() {
// Make map from reg names to reg integers.
if len(regNamesWasm) > 64 {
panic("too many registers")
}
num := map[string]int{}
for i, name := range regNamesWasm {
num[name] = i
}
buildReg := func(s string) regMask {
m := regMask(0)
for _, r := range strings.Split(s, " ") {
if n, ok := num[r]; ok {
m |= regMask(1) << uint(n)
continue
}
panic("register " + r + " not found")
}
return m
}
var (
gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15")
fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15")
gpsp = gp | buildReg("SP")
gpspsb = gpsp | buildReg("SB")
// The "registers", which are actually local variables, can get clobbered
// if we're switching goroutines, because it unwinds the WebAssembly stack.
callerSave = gp | fp | buildReg("g")
)
// Common regInfo
var (
gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
gp11 = regInfo{inputs: []regMask{gpsp}, outputs: []regMask{gp}}
gp21 = regInfo{inputs: []regMask{gpsp, gpsp}, outputs: []regMask{gp}}
gp31 = regInfo{inputs: []regMask{gpsp, gpsp, gpsp}, outputs: []regMask{gp}}
fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
fp21gp = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{gp}}
gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: []regMask{gp}}
gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
fpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: []regMask{fp}}
fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}}
// fpstoreconst = regInfo{inputs: []regMask{fp, 0}}
)
var WasmOps = []opData{
{name: "LoweredStaticCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "LoweredClosureCall", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp, 0}, clobbers: callerSave}, aux: "Int64", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "LoweredInterCall", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
{name: "LoweredAddr", argLength: 1, reg: gp11, aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // returns base+aux, arg0=base
{name: "LoweredMove", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp}}, aux: "Int64"}, // large move. arg0=dst, arg1=src, arg2=mem, auxint=len/8, returns mem
{name: "LoweredZero", argLength: 2, reg: regInfo{inputs: []regMask{gp}}, aux: "Int64"}, // large zeroing. arg0=start, arg1=mem, auxint=len/8, returns mem
{name: "LoweredGetClosurePtr", reg: gp01}, // returns wasm.REG_CTXT, the closure pointer
{name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, // returns the PC of the caller of the current function
{name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, // returns the SP of the caller of the current function
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem
{name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp}}, aux: "Sym", symEffect: "None"}, // invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
{name: "LoweredRound32F", argLength: 1, reg: fp11, typ: "Float32"}, // rounds arg0 to 32-bit float precision. arg0=value
// LoweredConvert converts between pointers and integers.
// We have a special op for this so as to not confuse GC
// (particularly stack maps). It takes a memory arg so it
// gets correctly ordered with respect to GC safepoints.
// arg0=ptr/int arg1=mem, output=int/ptr
//
// TODO(neelance): LoweredConvert should not be necessary any more, since OpConvert does not need to be lowered any more (CL 108496).
{name: "LoweredConvert", argLength: 2, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}},
// The following are native WebAssembly instructions, see https://webassembly.github.io/spec/core/syntax/instructions.html
{name: "Select", asm: "Select", argLength: 3, reg: gp31}, // returns arg0 if arg2 != 0, otherwise returns arg1
{name: "I64Load8U", asm: "I64Load8U", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt8"}, // read unsigned 8-bit integer from address arg0+aux, arg1=mem
{name: "I64Load8S", asm: "I64Load8S", argLength: 2, reg: gpload, aux: "Int64", typ: "Int8"}, // read signed 8-bit integer from address arg0+aux, arg1=mem
{name: "I64Load16U", asm: "I64Load16U", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt16"}, // read unsigned 16-bit integer from address arg0+aux, arg1=mem
{name: "I64Load16S", asm: "I64Load16S", argLength: 2, reg: gpload, aux: "Int64", typ: "Int16"}, // read signed 16-bit integer from address arg0+aux, arg1=mem
{name: "I64Load32U", asm: "I64Load32U", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt32"}, // read unsigned 32-bit integer from address arg0+aux, arg1=mem
{name: "I64Load32S", asm: "I64Load32S", argLength: 2, reg: gpload, aux: "Int64", typ: "Int32"}, // read signed 32-bit integer from address arg0+aux, arg1=mem
{name: "I64Load", asm: "I64Load", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt64"}, // read 64-bit integer from address arg0+aux, arg1=mem
{name: "I64Store8", asm: "I64Store8", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 8-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
{name: "I64Store16", asm: "I64Store16", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 16-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
{name: "I64Store32", asm: "I64Store32", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 32-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
{name: "I64Store", asm: "I64Store", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 64-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
{name: "F32Load", asm: "F32Load", argLength: 2, reg: fpload, aux: "Int64", typ: "Float64"}, // read 32-bit float from address arg0+aux, arg1=mem
{name: "F64Load", asm: "F64Load", argLength: 2, reg: fpload, aux: "Int64", typ: "Float64"}, // read 64-bit float from address arg0+aux, arg1=mem
{name: "F32Store", asm: "F32Store", argLength: 3, reg: fpstore, aux: "Int64", typ: "Mem"}, // store 32-bit float arg1 at address arg0+aux, arg2=mem, returns mem
{name: "F64Store", asm: "F64Store", argLength: 3, reg: fpstore, aux: "Int64", typ: "Mem"}, // store 64-bit float arg1 at address arg0+aux, arg2=mem, returns mem
{name: "I64Const", reg: gp01, aux: "Int64", rematerializeable: true, typ: "Int64"}, // returns the constant integer aux
{name: "F64Const", reg: fp01, aux: "Float64", rematerializeable: true, typ: "Float64"}, // returns the constant float aux
{name: "I64Eqz", asm: "I64Eqz", argLength: 1, reg: gp11, typ: "Bool"}, // arg0 == 0
{name: "I64Eq", asm: "I64Eq", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 == arg1
{name: "I64Ne", asm: "I64Ne", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 != arg1
{name: "I64LtS", asm: "I64LtS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 < arg1 (signed)
{name: "I64LtU", asm: "I64LtU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 < arg1 (unsigned)
{name: "I64GtS", asm: "I64GtS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 > arg1 (signed)
{name: "I64GtU", asm: "I64GtU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 > arg1 (unsigned)
{name: "I64LeS", asm: "I64LeS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 <= arg1 (signed)
{name: "I64LeU", asm: "I64LeU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 <= arg1 (unsigned)
{name: "I64GeS", asm: "I64GeS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 >= arg1 (signed)
{name: "I64GeU", asm: "I64GeU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 >= arg1 (unsigned)
{name: "F64Eq", asm: "F64Eq", argLength: 2, reg: fp21gp, typ: "Bool"}, // arg0 == arg1
{name: "F64Ne", asm: "F64Ne", argLength: 2, reg: fp21gp, typ: "Bool"}, // arg0 != arg1
{name: "F64Lt", asm: "F64Lt", argLength: 2, reg: fp21gp, typ: "Bool"}, // arg0 < arg1
{name: "F64Gt", asm: "F64Gt", argLength: 2, reg: fp21gp, typ: "Bool"}, // arg0 > arg1
{name: "F64Le", asm: "F64Le", argLength: 2, reg: fp21gp, typ: "Bool"}, // arg0 <= arg1
{name: "F64Ge", asm: "F64Ge", argLength: 2, reg: fp21gp, typ: "Bool"}, // arg0 >= arg1
{name: "I64Add", asm: "I64Add", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 + arg1
{name: "I64AddConst", asm: "I64Add", argLength: 1, reg: gp11, aux: "Int64", typ: "Int64"}, // arg0 + aux
{name: "I64Sub", asm: "I64Sub", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 - arg1
{name: "I64Mul", asm: "I64Mul", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 * arg1
{name: "I64DivS", asm: "I64DivS", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 / arg1 (signed)
{name: "I64DivU", asm: "I64DivU", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 / arg1 (unsigned)
{name: "I64RemS", asm: "I64RemS", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 % arg1 (signed)
{name: "I64RemU", asm: "I64RemU", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 % arg1 (unsigned)
{name: "I64And", asm: "I64And", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 & arg1
{name: "I64Or", asm: "I64Or", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 | arg1
{name: "I64Xor", asm: "I64Xor", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 ^ arg1
{name: "I64Shl", asm: "I64Shl", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 << (arg1 % 64)
{name: "I64ShrS", asm: "I64ShrS", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 >> (arg1 % 64) (signed)
{name: "I64ShrU", asm: "I64ShrU", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 >> (arg1 % 64) (unsigned)
{name: "F64Neg", asm: "F64Neg", argLength: 1, reg: fp11, typ: "Float64"}, // -arg0
{name: "F64Add", asm: "F64Add", argLength: 2, reg: fp21, typ: "Float64"}, // arg0 + arg1
{name: "F64Sub", asm: "F64Sub", argLength: 2, reg: fp21, typ: "Float64"}, // arg0 - arg1
{name: "F64Mul", asm: "F64Mul", argLength: 2, reg: fp21, typ: "Float64"}, // arg0 * arg1
{name: "F64Div", asm: "F64Div", argLength: 2, reg: fp21, typ: "Float64"}, // arg0 / arg1
{name: "I64TruncSF64", asm: "I64TruncSF64", argLength: 1, reg: regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer
{name: "I64TruncUF64", asm: "I64TruncUF64", argLength: 1, reg: regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to an unsigned integer
{name: "F64ConvertSI64", asm: "F64ConvertSI64", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}, typ: "Float64"}, // converts the signed integer arg0 to a float
{name: "F64ConvertUI64", asm: "F64ConvertUI64", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}, typ: "Float64"}, // converts the unsigned integer arg0 to a float
}
archs = append(archs, arch{
name: "Wasm",
pkg: "cmd/internal/obj/wasm",
genfile: "",
ops: WasmOps,
blocks: nil,
regnames: regNamesWasm,
gpregmask: gp,
fpregmask: fp,
framepointerreg: -1, // not used
linkreg: -1, // not used
})
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -90,7 +90,8 @@ func schedule(f *Func) {
case v.Op == OpAMD64LoweredGetClosurePtr || v.Op == OpPPC64LoweredGetClosurePtr ||
v.Op == OpARMLoweredGetClosurePtr || v.Op == OpARM64LoweredGetClosurePtr ||
v.Op == Op386LoweredGetClosurePtr || v.Op == OpMIPS64LoweredGetClosurePtr ||
v.Op == OpS390XLoweredGetClosurePtr || v.Op == OpMIPSLoweredGetClosurePtr:
v.Op == OpS390XLoweredGetClosurePtr || v.Op == OpMIPSLoweredGetClosurePtr ||
v.Op == OpWasmLoweredGetClosurePtr:
// We also score GetLoweredClosurePtr as early as possible to ensure that the
// context register is not stomped. GetLoweredClosurePtr should only appear
// in the entry block where there are no phi functions, so there is no
@ -102,7 +103,8 @@ func schedule(f *Func) {
case v.Op == OpAMD64LoweredNilCheck || v.Op == OpPPC64LoweredNilCheck ||
v.Op == OpARMLoweredNilCheck || v.Op == OpARM64LoweredNilCheck ||
v.Op == Op386LoweredNilCheck || v.Op == OpMIPS64LoweredNilCheck ||
v.Op == OpS390XLoweredNilCheck || v.Op == OpMIPSLoweredNilCheck:
v.Op == OpS390XLoweredNilCheck || v.Op == OpMIPSLoweredNilCheck ||
v.Op == OpWasmLoweredNilCheck:
// Nil checks must come before loads from the same address.
score[v.ID] = ScoreNilCheck
case v.Op == OpPhi:

View File

@ -18,7 +18,8 @@ func tighten(f *Func) {
OpAMD64LoweredGetClosurePtr, Op386LoweredGetClosurePtr,
OpARMLoweredGetClosurePtr, OpARM64LoweredGetClosurePtr,
OpMIPSLoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr,
OpS390XLoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr:
OpS390XLoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr,
OpWasmLoweredGetClosurePtr:
// Phis need to stay in their block.
// GetClosurePtr & Arg must stay in the entry block.
// Tuple selectors must stay with the tuple generator.

View File

@ -0,0 +1,430 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package wasm
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/wasm"
)
func Init(arch *gc.Arch) {
arch.LinkArch = &wasm.Linkwasm
arch.REGSP = wasm.REG_SP
arch.MAXWIDTH = 1 << 50
arch.ZeroRange = zeroRange
arch.ZeroAuto = zeroAuto
arch.Ginsnop = ginsnop
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}
func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if cnt%8 != 0 {
gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
for i := int64(0); i < cnt; i += 8 {
p = pp.Appendpp(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0)
p = pp.Appendpp(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0)
p = pp.Appendpp(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i)
}
return p
}
func zeroAuto(pp *gc.Progs, n *gc.Node) {
sym := n.Sym.Linksym()
size := n.Type.Size()
for i := int64(0); i < size; i += 8 {
p := pp.Prog(wasm.AGet)
p.From = obj.Addr{Type: obj.TYPE_REG, Reg: wasm.REG_SP}
p = pp.Prog(wasm.AI64Const)
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: 0}
p = pp.Prog(wasm.AI64Store)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_AUTO, Offset: n.Xoffset + i, Sym: sym}
}
}
func ginsnop(pp *gc.Progs) {
pp.Prog(wasm.ANop)
}
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
goToBlock := func(block *ssa.Block, canFallthrough bool) {
if canFallthrough && block == next {
return
}
s.Br(obj.AJMP, block)
}
switch b.Kind {
case ssa.BlockPlain:
goToBlock(b.Succs[0].Block(), true)
case ssa.BlockIf:
getReg32(s, b.Control)
s.Prog(wasm.AI32Eqz)
s.Prog(wasm.AIf)
goToBlock(b.Succs[1].Block(), false)
s.Prog(wasm.AEnd)
goToBlock(b.Succs[0].Block(), true)
case ssa.BlockRet:
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
case ssa.BlockExit:
s.Prog(obj.AUNDEF)
case ssa.BlockDefer:
p := s.Prog(wasm.AGet)
p.From = obj.Addr{Type: obj.TYPE_REG, Reg: wasm.REG_RET0}
s.Prog(wasm.AI64Eqz)
s.Prog(wasm.AI32Eqz)
s.Prog(wasm.AIf)
goToBlock(b.Succs[1].Block(), false)
s.Prog(wasm.AEnd)
goToBlock(b.Succs[0].Block(), true)
default:
panic("unexpected block")
}
// Entry point for the next block. Used by the JMP in goToBlock.
s.Prog(wasm.ARESUMEPOINT)
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
switch v.Op {
case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall:
s.PrepareCall(v)
if v.Aux == gc.Deferreturn {
// add a resume point before call to deferreturn so it can be called again via jmpdefer
s.Prog(wasm.ARESUMEPOINT)
}
if v.Op == ssa.OpWasmLoweredClosureCall {
getReg64(s, v.Args[1])
setReg(s, wasm.REG_CTXT)
}
if sym, ok := v.Aux.(*obj.LSym); ok {
p := s.Prog(obj.ACALL)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: sym}
} else {
getReg64(s, v.Args[0])
p := s.Prog(obj.ACALL)
p.To = obj.Addr{Type: obj.TYPE_NONE}
}
case ssa.OpWasmLoweredMove:
getReg32(s, v.Args[0])
getReg32(s, v.Args[1])
i32Const(s, int32(v.AuxInt))
p := s.Prog(wasm.ACall)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmMove}
case ssa.OpWasmLoweredZero:
getReg32(s, v.Args[0])
i32Const(s, int32(v.AuxInt))
p := s.Prog(wasm.ACall)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmZero}
case ssa.OpWasmLoweredNilCheck:
getReg64(s, v.Args[0])
s.Prog(wasm.AI64Eqz)
s.Prog(wasm.AIf)
p := s.Prog(wasm.ACALLNORESUME)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.SigPanic}
s.Prog(wasm.AEnd)
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
}
case ssa.OpWasmLoweredWB:
getReg64(s, v.Args[0])
getReg64(s, v.Args[1])
p := s.Prog(wasm.ACALLNORESUME) // TODO(neelance): If possible, turn this into a simple wasm.ACall).
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: v.Aux.(*obj.LSym)}
case ssa.OpWasmI64Store8, ssa.OpWasmI64Store16, ssa.OpWasmI64Store32, ssa.OpWasmI64Store, ssa.OpWasmF32Store, ssa.OpWasmF64Store:
getReg32(s, v.Args[0])
getReg64(s, v.Args[1])
if v.Op == ssa.OpWasmF32Store {
s.Prog(wasm.AF32DemoteF64)
}
p := s.Prog(v.Op.Asm())
p.To = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
case ssa.OpStoreReg:
getReg(s, wasm.REG_SP)
getReg64(s, v.Args[0])
if v.Type.Etype == types.TFLOAT32 {
s.Prog(wasm.AF32DemoteF64)
}
p := s.Prog(storeOp(v.Type))
gc.AddrAuto(&p.To, v)
default:
if v.Type.IsMemory() {
return
}
ssaGenValueOnStack(s, v)
setReg(s, v.Reg())
}
}
func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value) {
switch v.Op {
case ssa.OpWasmLoweredGetClosurePtr:
getReg(s, wasm.REG_CTXT)
case ssa.OpWasmLoweredGetCallerPC:
p := s.Prog(wasm.AI64Load)
// Caller PC is stored 8 bytes below first parameter.
p.From = obj.Addr{
Type: obj.TYPE_MEM,
Name: obj.NAME_PARAM,
Offset: -8,
}
case ssa.OpWasmLoweredGetCallerSP:
p := s.Prog(wasm.AGet)
// Caller SP is the address of the first parameter.
p.From = obj.Addr{
Type: obj.TYPE_ADDR,
Name: obj.NAME_PARAM,
Reg: wasm.REG_SP,
Offset: 0,
}
case ssa.OpWasmLoweredAddr:
p := s.Prog(wasm.AGet)
switch n := v.Aux.(type) {
case *obj.LSym:
p.From = obj.Addr{Type: obj.TYPE_ADDR, Name: obj.NAME_EXTERN, Sym: n}
case *gc.Node:
p.From = obj.Addr{
Type: obj.TYPE_ADDR,
Name: obj.NAME_AUTO,
Reg: v.Args[0].Reg(),
Offset: n.Xoffset,
}
if n.Class() == gc.PPARAM || n.Class() == gc.PPARAMOUT {
p.From.Name = obj.NAME_PARAM
}
default:
panic("wasm: bad LoweredAddr")
}
case ssa.OpWasmLoweredRound32F:
getReg64(s, v.Args[0])
s.Prog(wasm.AF32DemoteF64)
s.Prog(wasm.AF64PromoteF32)
case ssa.OpWasmLoweredConvert:
getReg64(s, v.Args[0])
case ssa.OpWasmSelect:
getReg64(s, v.Args[0])
getReg64(s, v.Args[1])
getReg64(s, v.Args[2])
s.Prog(wasm.AI32WrapI64)
s.Prog(v.Op.Asm())
case ssa.OpWasmI64AddConst:
getReg64(s, v.Args[0])
i64Const(s, v.AuxInt)
s.Prog(v.Op.Asm())
case ssa.OpWasmI64Const:
i64Const(s, v.AuxInt)
case ssa.OpWasmF64Const:
f64Const(s, v.AuxFloat())
case ssa.OpWasmI64Load8U, ssa.OpWasmI64Load8S, ssa.OpWasmI64Load16U, ssa.OpWasmI64Load16S, ssa.OpWasmI64Load32U, ssa.OpWasmI64Load32S, ssa.OpWasmI64Load, ssa.OpWasmF32Load, ssa.OpWasmF64Load:
getReg32(s, v.Args[0])
p := s.Prog(v.Op.Asm())
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
if v.Op == ssa.OpWasmF32Load {
s.Prog(wasm.AF64PromoteF32)
}
case ssa.OpWasmI64Eqz:
getReg64(s, v.Args[0])
s.Prog(v.Op.Asm())
s.Prog(wasm.AI64ExtendUI32)
case ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU, ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge:
getReg64(s, v.Args[0])
getReg64(s, v.Args[1])
s.Prog(v.Op.Asm())
s.Prog(wasm.AI64ExtendUI32)
case ssa.OpWasmI64Add, ssa.OpWasmI64Sub, ssa.OpWasmI64Mul, ssa.OpWasmI64DivU, ssa.OpWasmI64RemS, ssa.OpWasmI64RemU, ssa.OpWasmI64And, ssa.OpWasmI64Or, ssa.OpWasmI64Xor, ssa.OpWasmI64Shl, ssa.OpWasmI64ShrS, ssa.OpWasmI64ShrU, ssa.OpWasmF64Add, ssa.OpWasmF64Sub, ssa.OpWasmF64Mul, ssa.OpWasmF64Div:
getReg64(s, v.Args[0])
getReg64(s, v.Args[1])
s.Prog(v.Op.Asm())
case ssa.OpWasmI64DivS:
getReg64(s, v.Args[0])
getReg64(s, v.Args[1])
if v.Type.Size() == 8 {
// Division of int64 needs helper function wasmDiv to handle the MinInt64 / -1 case.
p := s.Prog(wasm.ACall)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmDiv}
break
}
s.Prog(wasm.AI64DivS)
case ssa.OpWasmI64TruncSF64:
getReg64(s, v.Args[0])
p := s.Prog(wasm.ACall)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmTruncS}
case ssa.OpWasmI64TruncUF64:
getReg64(s, v.Args[0])
p := s.Prog(wasm.ACall)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmTruncU}
case ssa.OpWasmF64Neg, ssa.OpWasmF64ConvertSI64, ssa.OpWasmF64ConvertUI64:
getReg64(s, v.Args[0])
s.Prog(v.Op.Asm())
case ssa.OpLoadReg:
p := s.Prog(loadOp(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
if v.Type.Etype == types.TFLOAT32 {
s.Prog(wasm.AF64PromoteF32)
}
case ssa.OpCopy:
getReg64(s, v.Args[0])
default:
v.Fatalf("unexpected op: %s", v.Op)
}
}
func getReg32(s *gc.SSAGenState, v *ssa.Value) {
reg := v.Reg()
getReg(s, reg)
if reg != wasm.REG_SP {
s.Prog(wasm.AI32WrapI64)
}
}
func getReg64(s *gc.SSAGenState, v *ssa.Value) {
reg := v.Reg()
getReg(s, reg)
if reg == wasm.REG_SP {
s.Prog(wasm.AI64ExtendUI32)
}
}
func i32Const(s *gc.SSAGenState, val int32) {
p := s.Prog(wasm.AI32Const)
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(val)}
}
func i64Const(s *gc.SSAGenState, val int64) {
p := s.Prog(wasm.AI64Const)
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: val}
}
func f64Const(s *gc.SSAGenState, val float64) {
p := s.Prog(wasm.AF64Const)
p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
}
func getReg(s *gc.SSAGenState, reg int16) {
p := s.Prog(wasm.AGet)
p.From = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
}
func setReg(s *gc.SSAGenState, reg int16) {
p := s.Prog(wasm.ASet)
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
}
func loadOp(t *types.Type) obj.As {
if t.IsFloat() {
switch t.Size() {
case 4:
return wasm.AF32Load
case 8:
return wasm.AF64Load
default:
panic("bad load type")
}
}
switch t.Size() {
case 1:
if t.IsSigned() {
return wasm.AI64Load8S
}
return wasm.AI64Load8U
case 2:
if t.IsSigned() {
return wasm.AI64Load16S
}
return wasm.AI64Load16U
case 4:
if t.IsSigned() {
return wasm.AI64Load32S
}
return wasm.AI64Load32U
case 8:
return wasm.AI64Load
default:
panic("bad load type")
}
}
func storeOp(t *types.Type) obj.As {
if t.IsFloat() {
switch t.Size() {
case 4:
return wasm.AF32Store
case 8:
return wasm.AF64Store
default:
panic("bad store type")
}
}
switch t.Size() {
case 1:
return wasm.AI64Store8
case 2:
return wasm.AI64Store16
case 4:
return wasm.AI64Store32
case 8:
return wasm.AI64Store
default:
panic("bad store type")
}
}

View File

@ -13,6 +13,7 @@ import (
"cmd/compile/internal/mips64"
"cmd/compile/internal/ppc64"
"cmd/compile/internal/s390x"
"cmd/compile/internal/wasm"
"cmd/compile/internal/x86"
"cmd/internal/objabi"
"fmt"
@ -33,6 +34,7 @@ var archInits = map[string]func(*gc.Arch){
"ppc64": ppc64.Init,
"ppc64le": ppc64.Init,
"s390x": s390x.Init,
"wasm": wasm.Init,
}
func main() {

View File

@ -49,6 +49,7 @@ var bootstrapDirs = []string{
"cmd/compile/internal/ssa",
"cmd/compile/internal/syntax",
"cmd/compile/internal/x86",
"cmd/compile/internal/wasm",
"cmd/internal/bio",
"cmd/internal/gcprog",
"cmd/internal/dwarf",
@ -61,6 +62,7 @@ var bootstrapDirs = []string{
"cmd/internal/obj/ppc64",
"cmd/internal/obj/s390x",
"cmd/internal/obj/x86",
"cmd/internal/obj/wasm",
"cmd/internal/src",
"cmd/internal/sys",
"cmd/link",
@ -100,6 +102,8 @@ var ignorePrefixes = []string{
var ignoreSuffixes = []string{
"_arm64.s",
"_arm64.go",
"_wasm.s",
"_wasm.go",
}
func bootstrapBuildTools() {

View File

@ -89,7 +89,7 @@ func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, a
gcargs = append(gcargs, "-buildid", a.buildID)
}
platform := cfg.Goos + "/" + cfg.Goarch
if p.Internal.OmitDebug || platform == "nacl/amd64p32" || cfg.Goos == "plan9" {
if p.Internal.OmitDebug || platform == "nacl/amd64p32" || cfg.Goos == "plan9" || cfg.Goarch == "wasm" {
gcargs = append(gcargs, "-dwarf=false")
}
if strings.HasPrefix(runtimeVersion, "go1") && !strings.Contains(os.Args[0], "go_bootstrap") {

View File

@ -364,6 +364,7 @@ const (
ABaseARM64
ABaseMIPS
ABaseS390X
ABaseWasm
AllowedOpCodes = 1 << 11 // The number of opcodes available for any given architecture.
AMask = AllowedOpCodes - 1 // AND with this to use the opcode as an array index.
@ -595,7 +596,7 @@ func (ctxt *Link) Logf(format string, args ...interface{}) {
// the hardware stack pointer and the local variable area.
func (ctxt *Link) FixedFrameSize() int64 {
switch ctxt.Arch.Family {
case sys.AMD64, sys.I386:
case sys.AMD64, sys.I386, sys.Wasm:
return 0
case sys.PPC64:
// PIC code on ppc64le requires 32 bytes of stack, and it's easier to

View File

@ -26,7 +26,7 @@ var (
pkg = flag.String("p", "", "package name")
)
var Are = regexp.MustCompile(`^\tA([A-Z0-9]+)`)
var Are = regexp.MustCompile(`^\tA([A-Za-z0-9]+)`)
func main() {
flag.Parse()

View File

@ -394,6 +394,7 @@ const (
RBaseARM64 = 8 * 1024 // range [8k, 13k)
RBaseMIPS = 13 * 1024 // range [13k, 14k)
RBaseS390X = 14 * 1024 // range [14k, 15k)
RBaseWasm = 16 * 1024
)
// RegisterRegister binds a pretty-printer (Rconv) for register

View File

@ -0,0 +1,288 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package wasm
import "cmd/internal/obj"
//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p wasm
const (
/* mark flags */
DONE = 1 << iota
PRESERVEFLAGS // not allowed to clobber flags
)
/*
* wasm
*/
const (
ACallImport = obj.ABaseWasm + obj.A_ARCHSPECIFIC + iota
AGet
ASet
ATee
ANot // alias for I32Eqz
// The following are low-level WebAssembly instructions.
// Their order matters, since it matches the opcode encoding.
// Gaps in the encoding are indicated by comments.
AUnreachable // opcode 0x00
ANop
ABlock
ALoop
AIf
AElse
AEnd // opcode 0x0B
ABr
ABrIf
ABrTable
// ACall and AReturn are WebAssembly instructions. obj.ACALL and obj.ARET are higher level instructions
// with Go semantics, e.g. they manipulate the Go stack on the linear memory.
AReturn
ACall
ACallIndirect
ADrop // opcode 0x1A
ASelect
AI32Load // opcode 0x28
AI64Load
AF32Load
AF64Load
AI32Load8S
AI32Load8U
AI32Load16S
AI32Load16U
AI64Load8S
AI64Load8U
AI64Load16S
AI64Load16U
AI64Load32S
AI64Load32U
AI32Store
AI64Store
AF32Store
AF64Store
AI32Store8
AI32Store16
AI64Store8
AI64Store16
AI64Store32
ACurrentMemory
AGrowMemory
AI32Const
AI64Const
AF32Const
AF64Const
AI32Eqz
AI32Eq
AI32Ne
AI32LtS
AI32LtU
AI32GtS
AI32GtU
AI32LeS
AI32LeU
AI32GeS
AI32GeU
AI64Eqz
AI64Eq
AI64Ne
AI64LtS
AI64LtU
AI64GtS
AI64GtU
AI64LeS
AI64LeU
AI64GeS
AI64GeU
AF32Eq
AF32Ne
AF32Lt
AF32Gt
AF32Le
AF32Ge
AF64Eq
AF64Ne
AF64Lt
AF64Gt
AF64Le
AF64Ge
AI32Clz
AI32Ctz
AI32Popcnt
AI32Add
AI32Sub
AI32Mul
AI32DivS
AI32DivU
AI32RemS
AI32RemU
AI32And
AI32Or
AI32Xor
AI32Shl
AI32ShrS
AI32ShrU
AI32Rotl
AI32Rotr
AI64Clz
AI64Ctz
AI64Popcnt
AI64Add
AI64Sub
AI64Mul
AI64DivS
AI64DivU
AI64RemS
AI64RemU
AI64And
AI64Or
AI64Xor
AI64Shl
AI64ShrS
AI64ShrU
AI64Rotl
AI64Rotr
AF32Abs
AF32Neg
AF32Ceil
AF32Floor
AF32Trunc
AF32Nearest
AF32Sqrt
AF32Add
AF32Sub
AF32Mul
AF32Div
AF32Min
AF32Max
AF32Copysign
AF64Abs
AF64Neg
AF64Ceil
AF64Floor
AF64Trunc
AF64Nearest
AF64Sqrt
AF64Add
AF64Sub
AF64Mul
AF64Div
AF64Min
AF64Max
AF64Copysign
AI32WrapI64
AI32TruncSF32
AI32TruncUF32
AI32TruncSF64
AI32TruncUF64
AI64ExtendSI32
AI64ExtendUI32
AI64TruncSF32
AI64TruncUF32
AI64TruncSF64
AI64TruncUF64
AF32ConvertSI32
AF32ConvertUI32
AF32ConvertSI64
AF32ConvertUI64
AF32DemoteF64
AF64ConvertSI32
AF64ConvertUI32
AF64ConvertSI64
AF64ConvertUI64
AF64PromoteF32
AI32ReinterpretF32
AI64ReinterpretF64
AF32ReinterpretI32
AF64ReinterpretI64
// End of low-level WebAssembly instructions.
ARESUMEPOINT
// ACALLNORESUME is a call which is not followed by a resume point.
// It is allowed inside of WebAssembly blocks, whereas obj.ACALL is not.
// However, it is not allowed to switch goroutines while inside of an ACALLNORESUME call.
ACALLNORESUME
AMOVB
AMOVH
AMOVW
AMOVD
AWORD
ALAST
)
const (
REG_NONE = 0
)
const (
// globals
REG_PC_F = obj.RBaseWasm + iota
REG_PC_B
REG_SP // SP is currently 32-bit, until 64-bit memory operations are available
REG_CTXT
REG_g
// RET* are used by runtime.return0 and runtime.reflectcall. These functions pass return values in registers.
REG_RET0
REG_RET1
REG_RET2
REG_RET3
// locals
REG_R0
REG_R1
REG_R2
REG_R3
REG_R4
REG_R5
REG_R6
REG_R7
REG_R8
REG_R9
REG_R10
REG_R11
REG_R12
REG_R13
REG_R14
REG_R15
REG_F0
REG_F1
REG_F2
REG_F3
REG_F4
REG_F5
REG_F6
REG_F7
REG_F8
REG_F9
REG_F10
REG_F11
REG_F12
REG_F13
REG_F14
REG_F15
MAXREG
MINREG = REG_PC_F
REGSP = REG_SP
REGCTXT = REG_CTXT
REGG = REG_g
)

View File

@ -0,0 +1,189 @@
// Generated by stringer -i a.out.go -o anames.go -p wasm
// Do not edit.
package wasm
import "cmd/internal/obj"
var Anames = []string{
obj.A_ARCHSPECIFIC: "CallImport",
"Get",
"Set",
"Tee",
"Not",
"Unreachable",
"Nop",
"Block",
"Loop",
"If",
"Else",
"End",
"Br",
"BrIf",
"BrTable",
"Return",
"Call",
"CallIndirect",
"Drop",
"Select",
"I32Load",
"I64Load",
"F32Load",
"F64Load",
"I32Load8S",
"I32Load8U",
"I32Load16S",
"I32Load16U",
"I64Load8S",
"I64Load8U",
"I64Load16S",
"I64Load16U",
"I64Load32S",
"I64Load32U",
"I32Store",
"I64Store",
"F32Store",
"F64Store",
"I32Store8",
"I32Store16",
"I64Store8",
"I64Store16",
"I64Store32",
"CurrentMemory",
"GrowMemory",
"I32Const",
"I64Const",
"F32Const",
"F64Const",
"I32Eqz",
"I32Eq",
"I32Ne",
"I32LtS",
"I32LtU",
"I32GtS",
"I32GtU",
"I32LeS",
"I32LeU",
"I32GeS",
"I32GeU",
"I64Eqz",
"I64Eq",
"I64Ne",
"I64LtS",
"I64LtU",
"I64GtS",
"I64GtU",
"I64LeS",
"I64LeU",
"I64GeS",
"I64GeU",
"F32Eq",
"F32Ne",
"F32Lt",
"F32Gt",
"F32Le",
"F32Ge",
"F64Eq",
"F64Ne",
"F64Lt",
"F64Gt",
"F64Le",
"F64Ge",
"I32Clz",
"I32Ctz",
"I32Popcnt",
"I32Add",
"I32Sub",
"I32Mul",
"I32DivS",
"I32DivU",
"I32RemS",
"I32RemU",
"I32And",
"I32Or",
"I32Xor",
"I32Shl",
"I32ShrS",
"I32ShrU",
"I32Rotl",
"I32Rotr",
"I64Clz",
"I64Ctz",
"I64Popcnt",
"I64Add",
"I64Sub",
"I64Mul",
"I64DivS",
"I64DivU",
"I64RemS",
"I64RemU",
"I64And",
"I64Or",
"I64Xor",
"I64Shl",
"I64ShrS",
"I64ShrU",
"I64Rotl",
"I64Rotr",
"F32Abs",
"F32Neg",
"F32Ceil",
"F32Floor",
"F32Trunc",
"F32Nearest",
"F32Sqrt",
"F32Add",
"F32Sub",
"F32Mul",
"F32Div",
"F32Min",
"F32Max",
"F32Copysign",
"F64Abs",
"F64Neg",
"F64Ceil",
"F64Floor",
"F64Trunc",
"F64Nearest",
"F64Sqrt",
"F64Add",
"F64Sub",
"F64Mul",
"F64Div",
"F64Min",
"F64Max",
"F64Copysign",
"I32WrapI64",
"I32TruncSF32",
"I32TruncUF32",
"I32TruncSF64",
"I32TruncUF64",
"I64ExtendSI32",
"I64ExtendUI32",
"I64TruncSF32",
"I64TruncUF32",
"I64TruncSF64",
"I64TruncUF64",
"F32ConvertSI32",
"F32ConvertUI32",
"F32ConvertSI64",
"F32ConvertUI64",
"F32DemoteF64",
"F64ConvertSI32",
"F64ConvertUI32",
"F64ConvertSI64",
"F64ConvertUI64",
"F64PromoteF32",
"I32ReinterpretF32",
"I64ReinterpretF64",
"F32ReinterpretI32",
"F64ReinterpretI64",
"RESUMEPOINT",
"CALLNORESUME",
"MOVB",
"MOVH",
"MOVW",
"MOVD",
"WORD",
"LAST",
}

View File

@ -0,0 +1,934 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package wasm
import (
"bytes"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/sys"
"encoding/binary"
"fmt"
"io"
"math"
)
var Register = map[string]int16{
"PC_F": REG_PC_F,
"PC_B": REG_PC_B,
"SP": REG_SP,
"CTXT": REG_CTXT,
"g": REG_g,
"RET0": REG_RET0,
"RET1": REG_RET1,
"RET2": REG_RET2,
"RET3": REG_RET3,
"R0": REG_R0,
"R1": REG_R1,
"R2": REG_R2,
"R3": REG_R3,
"R4": REG_R4,
"R5": REG_R5,
"R6": REG_R6,
"R7": REG_R7,
"R8": REG_R8,
"R9": REG_R9,
"R10": REG_R10,
"R11": REG_R11,
"R12": REG_R12,
"R13": REG_R13,
"R14": REG_R14,
"R15": REG_R15,
"F0": REG_F0,
"F1": REG_F1,
"F2": REG_F2,
"F3": REG_F3,
"F4": REG_F4,
"F5": REG_F5,
"F6": REG_F6,
"F7": REG_F7,
"F8": REG_F8,
"F9": REG_F9,
"F10": REG_F10,
"F11": REG_F11,
"F12": REG_F12,
"F13": REG_F13,
"F14": REG_F14,
"F15": REG_F15,
}
var registerNames []string
func init() {
obj.RegisterRegister(MINREG, MAXREG, rconv)
obj.RegisterOpcode(obj.ABaseWasm, Anames)
registerNames = make([]string, MAXREG-MINREG)
for name, reg := range Register {
registerNames[reg-MINREG] = name
}
}
func rconv(r int) string {
return registerNames[r-MINREG]
}
var unaryDst = map[obj.As]bool{
ASet: true,
ATee: true,
ACall: true,
ACallIndirect: true,
ACallImport: true,
ABr: true,
ABrIf: true,
ABrTable: true,
AI32Store: true,
AI64Store: true,
AF32Store: true,
AF64Store: true,
AI32Store8: true,
AI32Store16: true,
AI64Store8: true,
AI64Store16: true,
AI64Store32: true,
ACALLNORESUME: true,
}
var Linkwasm = obj.LinkArch{
Arch: sys.ArchWasm,
Init: instinit,
Preprocess: preprocess,
Assemble: assemble,
UnaryDst: unaryDst,
}
var (
morestack *obj.LSym
morestackNoCtxt *obj.LSym
gcWriteBarrier *obj.LSym
sigpanic *obj.LSym
deferreturn *obj.LSym
jmpdefer *obj.LSym
)
const (
/* mark flags */
WasmImport = 1 << 0
)
func instinit(ctxt *obj.Link) {
morestack = ctxt.Lookup("runtime.morestack")
morestackNoCtxt = ctxt.Lookup("runtime.morestack_noctxt")
gcWriteBarrier = ctxt.Lookup("runtime.gcWriteBarrier")
sigpanic = ctxt.Lookup("runtime.sigpanic")
deferreturn = ctxt.Lookup("runtime.deferreturn")
jmpdefer = ctxt.Lookup(`"".jmpdefer`)
}
func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
appendp := func(p *obj.Prog, as obj.As, args ...obj.Addr) *obj.Prog {
if p.As != obj.ANOP {
p2 := obj.Appendp(p, newprog)
p2.Pc = p.Pc
p = p2
}
p.As = as
switch len(args) {
case 0:
p.From = obj.Addr{}
p.To = obj.Addr{}
case 1:
if unaryDst[as] {
p.From = obj.Addr{}
p.To = args[0]
} else {
p.From = args[0]
p.To = obj.Addr{}
}
case 2:
p.From = args[0]
p.To = args[1]
default:
panic("bad args")
}
return p
}
framesize := s.Func.Text.To.Offset
if framesize < 0 {
panic("bad framesize")
}
s.Func.Args = s.Func.Text.To.Val.(int32)
s.Func.Locals = int32(framesize)
if s.Func.Text.From.Sym.Wrapper() {
// if g._panic != nil && g._panic.argp == FP {
// g._panic.argp = bottom-of-frame
// }
//
// MOVD g_panic(g), R0
// Get R0
// I64Eqz
// Not
// If
// Get SP
// I64ExtendUI32
// I64Const $framesize+8
// I64Add
// I64Load panic_argp(R0)
// I64Eq
// If
// MOVD SP, panic_argp(R0)
// End
// End
gpanic := obj.Addr{
Type: obj.TYPE_MEM,
Reg: REGG,
Offset: 4 * 8, // g_panic
}
panicargp := obj.Addr{
Type: obj.TYPE_MEM,
Reg: REG_R0,
Offset: 0, // panic.argp
}
p := s.Func.Text
p = appendp(p, AMOVD, gpanic, regAddr(REG_R0))
p = appendp(p, AGet, regAddr(REG_R0))
p = appendp(p, AI64Eqz)
p = appendp(p, ANot)
p = appendp(p, AIf)
p = appendp(p, AGet, regAddr(REG_SP))
p = appendp(p, AI64ExtendUI32)
p = appendp(p, AI64Const, constAddr(framesize+8))
p = appendp(p, AI64Add)
p = appendp(p, AI64Load, panicargp)
p = appendp(p, AI64Eq)
p = appendp(p, AIf)
p = appendp(p, AMOVD, regAddr(REG_SP), panicargp)
p = appendp(p, AEnd)
p = appendp(p, AEnd)
}
if framesize > 0 {
p := s.Func.Text
p = appendp(p, AGet, regAddr(REG_SP))
p = appendp(p, AI32Const, constAddr(framesize))
p = appendp(p, AI32Sub)
p = appendp(p, ASet, regAddr(REG_SP))
p.Spadj = int32(framesize)
}
// Introduce resume points for CALL instructions
// and collect other explicit resume points.
numResumePoints := 0
explicitBlockDepth := 0
pc := int64(0) // pc is only incremented when necessary, this avoids bloat of the BrTable instruction
var tableIdxs []uint64
tablePC := int64(0)
base := ctxt.PosTable.Pos(s.Func.Text.Pos).Base()
for p := s.Func.Text; p != nil; p = p.Link {
prevBase := base
base = ctxt.PosTable.Pos(p.Pos).Base()
switch p.As {
case ABlock, ALoop, AIf:
explicitBlockDepth++
case AEnd:
if explicitBlockDepth == 0 {
panic("End without block")
}
explicitBlockDepth--
case ARESUMEPOINT:
if explicitBlockDepth != 0 {
panic("RESUME can only be used on toplevel")
}
p.As = AEnd
for tablePC <= pc {
tableIdxs = append(tableIdxs, uint64(numResumePoints))
tablePC++
}
numResumePoints++
pc++
case obj.ACALL:
if explicitBlockDepth != 0 {
panic("CALL can only be used on toplevel, try CALLNORESUME instead")
}
appendp(p, ARESUMEPOINT)
}
p.Pc = pc
// Increase pc whenever some pc-value table needs a new entry. Don't increase it
// more often to avoid bloat of the BrTable instruction.
// The "base != prevBase" condition detects inlined instructions. They are an
// implicit call, so entering and leaving this section affects the stack trace.
if p.As == ACALLNORESUME || p.As == obj.ANOP || p.Spadj != 0 || base != prevBase {
pc++
}
}
tableIdxs = append(tableIdxs, uint64(numResumePoints))
s.Size = pc + 1
if !s.Func.Text.From.Sym.NoSplit() {
p := s.Func.Text
if framesize <= objabi.StackSmall {
// small stack: SP <= stackguard
// Get SP
// Get g
// I32WrapI64
// I32Load $stackguard0
// I32GtU
p = appendp(p, AGet, regAddr(REG_SP))
p = appendp(p, AGet, regAddr(REGG))
p = appendp(p, AI32WrapI64)
p = appendp(p, AI32Load, constAddr(2*int64(ctxt.Arch.PtrSize))) // G.stackguard0
p = appendp(p, AI32LeU)
} else {
// large stack: SP-framesize <= stackguard-StackSmall
// SP <= stackguard+(framesize-StackSmall)
// Get SP
// Get g
// I32WrapI64
// I32Load $stackguard0
// I32Const $(framesize-StackSmall)
// I32Add
// I32GtU
p = appendp(p, AGet, regAddr(REG_SP))
p = appendp(p, AGet, regAddr(REGG))
p = appendp(p, AI32WrapI64)
p = appendp(p, AI32Load, constAddr(2*int64(ctxt.Arch.PtrSize))) // G.stackguard0
p = appendp(p, AI32Const, constAddr(int64(framesize)-objabi.StackSmall))
p = appendp(p, AI32Add)
p = appendp(p, AI32LeU)
}
// TODO(neelance): handle wraparound case
p = appendp(p, AIf)
p = appendp(p, obj.ACALL, constAddr(0))
if s.Func.Text.From.Sym.NeedCtxt() {
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: morestack}
} else {
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: morestackNoCtxt}
}
p = appendp(p, AEnd)
}
// Add Block instructions for resume points and BrTable to jump to selected resume point.
if numResumePoints > 0 {
p := s.Func.Text
p = appendp(p, ALoop) // entryPointLoop, used to jump between basic blocks
for i := 0; i < numResumePoints+1; i++ {
p = appendp(p, ABlock)
}
p = appendp(p, AGet, regAddr(REG_PC_B)) // read next basic block from PC_B
p = appendp(p, ABrTable, obj.Addr{Val: tableIdxs})
p = appendp(p, AEnd) // end of Block
for p.Link != nil {
p = p.Link
}
p = appendp(p, AEnd) // end of entryPointLoop
p = appendp(p, obj.AUNDEF)
}
p := s.Func.Text
currentDepth := 0
blockDepths := make(map[*obj.Prog]int)
for p != nil {
switch p.As {
case ABlock, ALoop, AIf:
currentDepth++
blockDepths[p] = currentDepth
case AEnd:
currentDepth--
}
switch p.As {
case ABr, ABrIf:
if p.To.Type == obj.TYPE_BRANCH {
blockDepth, ok := blockDepths[p.To.Val.(*obj.Prog)]
if !ok {
panic("label not at block")
}
p.To = constAddr(int64(currentDepth - blockDepth))
}
case obj.AJMP:
jmp := *p
p.As = obj.ANOP
if jmp.To.Type == obj.TYPE_BRANCH {
// jump to basic block
p = appendp(p, AI32Const, constAddr(jmp.To.Val.(*obj.Prog).Pc))
p = appendp(p, ASet, regAddr(REG_PC_B)) // write next basic block to PC_B
p = appendp(p, ABr, constAddr(int64(currentDepth-1))) // jump to beginning of entryPointLoop
break
}
// reset PC_B to function entry
p = appendp(p, AI32Const, constAddr(0))
p = appendp(p, ASet, regAddr(REG_PC_B))
// low-level WebAssembly call to function
switch jmp.To.Type {
case obj.TYPE_MEM:
p = appendp(p, ACall, jmp.To)
case obj.TYPE_NONE:
// (target PC is on stack)
p = appendp(p, AI32WrapI64)
p = appendp(p, AI32Const, constAddr(16)) // only needs PC_F bits (16-31), PC_B bits (0-15) are zero
p = appendp(p, AI32ShrU)
p = appendp(p, ACallIndirect)
default:
panic("bad target for JMP")
}
p = appendp(p, AReturn)
case obj.ACALL, ACALLNORESUME:
call := *p
p.As = obj.ANOP
pcAfterCall := call.Link.Pc
if call.To.Sym == sigpanic {
pcAfterCall-- // sigpanic expects to be called without advancing the pc
}
// jmpdefer manipulates the return address on the stack so deferreturn gets called repeatedly.
// Model this in WebAssembly with a loop.
if call.To.Sym == deferreturn {
p = appendp(p, ALoop)
}
// SP -= 8
p = appendp(p, AGet, regAddr(REG_SP))
p = appendp(p, AI32Const, constAddr(8))
p = appendp(p, AI32Sub)
p = appendp(p, ASet, regAddr(REG_SP))
// write return address to Go stack
p = appendp(p, AGet, regAddr(REG_SP))
p = appendp(p, AI64Const, obj.Addr{
Type: obj.TYPE_ADDR,
Name: obj.NAME_EXTERN,
Sym: s, // PC_F
Offset: pcAfterCall, // PC_B
})
p = appendp(p, AI64Store, constAddr(0))
// reset PC_B to function entry
p = appendp(p, AI32Const, constAddr(0))
p = appendp(p, ASet, regAddr(REG_PC_B))
// low-level WebAssembly call to function
switch call.To.Type {
case obj.TYPE_MEM:
p = appendp(p, ACall, call.To)
case obj.TYPE_NONE:
// (target PC is on stack)
p = appendp(p, AI32WrapI64)
p = appendp(p, AI32Const, constAddr(16)) // only needs PC_F bits (16-31), PC_B bits (0-15) are zero
p = appendp(p, AI32ShrU)
p = appendp(p, ACallIndirect)
default:
panic("bad target for CALL")
}
// gcWriteBarrier has no return value, it never unwinds the stack
if call.To.Sym == gcWriteBarrier {
break
}
// jmpdefer removes the frame of deferreturn from the Go stack.
// However, its WebAssembly function still returns normally,
// so we need to return from deferreturn without removing its
// stack frame (no RET), because the frame is already gone.
if call.To.Sym == jmpdefer {
p = appendp(p, AReturn)
break
}
// return value of call is on the top of the stack, indicating whether to unwind the WebAssembly stack
p = appendp(p, AIf)
if call.As == ACALLNORESUME && call.To.Sym != sigpanic { // sigpanic unwinds the stack, but it never resumes
// trying to unwind WebAssembly stack but call has no resume point, terminate with error
p = appendp(p, obj.AUNDEF)
} else {
// unwinding WebAssembly stack to switch goroutine, return 1
p = appendp(p, AI32Const, constAddr(1))
p = appendp(p, AReturn)
}
p = appendp(p, AEnd)
// jump to before the call if jmpdefer has reset the return address to the call's PC
if call.To.Sym == deferreturn {
p = appendp(p, AGet, regAddr(REG_PC_B))
p = appendp(p, AI32Const, constAddr(call.Pc))
p = appendp(p, AI32Eq)
p = appendp(p, ABrIf, constAddr(0))
p = appendp(p, AEnd) // end of Loop
}
case obj.ARET:
ret := *p
p.As = obj.ANOP
if framesize > 0 {
// SP += framesize
p = appendp(p, AGet, regAddr(REG_SP))
p = appendp(p, AI32Const, constAddr(framesize))
p = appendp(p, AI32Add)
p = appendp(p, ASet, regAddr(REG_SP))
// TODO(neelance): This should theoretically set Spadj, but it only works without.
// p.Spadj = int32(-framesize)
}
if ret.To.Type == obj.TYPE_MEM {
// reset PC_B to function entry
p = appendp(p, AI32Const, constAddr(0))
p = appendp(p, ASet, regAddr(REG_PC_B))
// low-level WebAssembly call to function
p = appendp(p, ACall, ret.To)
p = appendp(p, AReturn)
break
}
// read return PC_F from Go stack
p = appendp(p, AGet, regAddr(REG_SP))
p = appendp(p, AI32Load16U, constAddr(2))
p = appendp(p, ASet, regAddr(REG_PC_F))
// read return PC_B from Go stack
p = appendp(p, AGet, regAddr(REG_SP))
p = appendp(p, AI32Load16U, constAddr(0))
p = appendp(p, ASet, regAddr(REG_PC_B))
// SP += 8
p = appendp(p, AGet, regAddr(REG_SP))
p = appendp(p, AI32Const, constAddr(8))
p = appendp(p, AI32Add)
p = appendp(p, ASet, regAddr(REG_SP))
// not switching goroutine, return 0
p = appendp(p, AI32Const, constAddr(0))
p = appendp(p, AReturn)
}
p = p.Link
}
p = s.Func.Text
for p != nil {
switch p.From.Name {
case obj.NAME_AUTO:
p.From.Offset += int64(framesize)
case obj.NAME_PARAM:
p.From.Reg = REG_SP
p.From.Offset += int64(framesize) + 8 // parameters are after the frame and the 8-byte return address
}
switch p.To.Name {
case obj.NAME_AUTO:
p.To.Offset += int64(framesize)
case obj.NAME_PARAM:
p.To.Reg = REG_SP
p.To.Offset += int64(framesize) + 8 // parameters are after the frame and the 8-byte return address
}
switch p.As {
case AGet:
if p.From.Type == obj.TYPE_ADDR {
get := *p
p.As = obj.ANOP
switch get.From.Name {
case obj.NAME_EXTERN:
p = appendp(p, AI64Const, get.From)
case obj.NAME_AUTO, obj.NAME_PARAM:
p = appendp(p, AGet, regAddr(get.From.Reg))
if get.From.Reg == REG_SP {
p = appendp(p, AI64ExtendUI32)
}
if get.From.Offset != 0 {
p = appendp(p, AI64Const, constAddr(get.From.Offset))
p = appendp(p, AI64Add)
}
default:
panic("bad Get: invalid name")
}
}
case AI32Load, AI64Load, AF32Load, AF64Load, AI32Load8S, AI32Load8U, AI32Load16S, AI32Load16U, AI64Load8S, AI64Load8U, AI64Load16S, AI64Load16U, AI64Load32S, AI64Load32U:
if p.From.Type == obj.TYPE_MEM {
as := p.As
from := p.From
p.As = AGet
p.From = regAddr(from.Reg)
if from.Reg != REG_SP {
p = appendp(p, AI32WrapI64)
}
p = appendp(p, as, constAddr(from.Offset))
}
case AMOVB, AMOVH, AMOVW, AMOVD:
mov := *p
p.As = obj.ANOP
var loadAs obj.As
var storeAs obj.As
switch mov.As {
case AMOVB:
loadAs = AI64Load8U
storeAs = AI64Store8
case AMOVH:
loadAs = AI64Load16U
storeAs = AI64Store16
case AMOVW:
loadAs = AI64Load32U
storeAs = AI64Store32
case AMOVD:
loadAs = AI64Load
storeAs = AI64Store
}
appendValue := func() {
switch mov.From.Type {
case obj.TYPE_CONST:
p = appendp(p, AI64Const, constAddr(mov.From.Offset))
case obj.TYPE_ADDR:
switch mov.From.Name {
case obj.NAME_NONE, obj.NAME_PARAM, obj.NAME_AUTO:
p = appendp(p, AGet, regAddr(mov.From.Reg))
if mov.From.Reg == REG_SP {
p = appendp(p, AI64ExtendUI32)
}
p = appendp(p, AI64Const, constAddr(mov.From.Offset))
p = appendp(p, AI64Add)
case obj.NAME_EXTERN:
p = appendp(p, AI64Const, mov.From)
default:
panic("bad name for MOV")
}
case obj.TYPE_REG:
p = appendp(p, AGet, mov.From)
if mov.From.Reg == REG_SP {
p = appendp(p, AI64ExtendUI32)
}
case obj.TYPE_MEM:
p = appendp(p, AGet, regAddr(mov.From.Reg))
if mov.From.Reg != REG_SP {
p = appendp(p, AI32WrapI64)
}
p = appendp(p, loadAs, constAddr(mov.From.Offset))
default:
panic("bad MOV type")
}
}
switch mov.To.Type {
case obj.TYPE_REG:
appendValue()
if mov.To.Reg == REG_SP {
p = appendp(p, AI32WrapI64)
}
p = appendp(p, ASet, mov.To)
case obj.TYPE_MEM:
switch mov.To.Name {
case obj.NAME_NONE, obj.NAME_PARAM:
p = appendp(p, AGet, regAddr(mov.To.Reg))
if mov.To.Reg != REG_SP {
p = appendp(p, AI32WrapI64)
}
case obj.NAME_EXTERN:
p = appendp(p, AI32Const, obj.Addr{Type: obj.TYPE_ADDR, Name: obj.NAME_EXTERN, Sym: mov.To.Sym})
default:
panic("bad MOV name")
}
appendValue()
p = appendp(p, storeAs, constAddr(mov.To.Offset))
default:
panic("bad MOV type")
}
case ACallImport:
p.As = obj.ANOP
p = appendp(p, AGet, regAddr(REG_SP))
p = appendp(p, ACall, obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: s})
p.Mark = WasmImport
}
p = p.Link
}
}
func constAddr(value int64) obj.Addr {
return obj.Addr{Type: obj.TYPE_CONST, Offset: value}
}
func regAddr(reg int16) obj.Addr {
return obj.Addr{Type: obj.TYPE_REG, Reg: reg}
}
func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
w := new(bytes.Buffer)
// Function starts with declaration of locals: numbers and types.
switch s.Name {
case "memchr":
writeUleb128(w, 1) // number of sets of locals
writeUleb128(w, 3) // number of locals
w.WriteByte(0x7F) // i32
case "memcmp":
writeUleb128(w, 1) // number of sets of locals
writeUleb128(w, 2) // number of locals
w.WriteByte(0x7F) // i32
default:
writeUleb128(w, 2) // number of sets of locals
writeUleb128(w, 16) // number of locals
w.WriteByte(0x7E) // i64
writeUleb128(w, 16) // number of locals
w.WriteByte(0x7C) // f64
}
for p := s.Func.Text; p != nil; p = p.Link {
switch p.As {
case AGet:
if p.From.Type != obj.TYPE_REG {
panic("bad Get: argument is not a register")
}
reg := p.From.Reg
switch {
case reg >= REG_PC_F && reg <= REG_RET3:
w.WriteByte(0x23) // get_global
writeUleb128(w, uint64(reg-REG_PC_F))
case reg >= REG_R0 && reg <= REG_F15:
w.WriteByte(0x20) // get_local
writeUleb128(w, uint64(reg-REG_R0))
default:
panic("bad Get: invalid register")
}
continue
case ASet:
if p.To.Type != obj.TYPE_REG {
panic("bad Set: argument is not a register")
}
reg := p.To.Reg
switch {
case reg >= REG_PC_F && reg <= REG_RET3:
w.WriteByte(0x24) // set_global
writeUleb128(w, uint64(reg-REG_PC_F))
case reg >= REG_R0 && reg <= REG_F15:
if p.Link.As == AGet && p.Link.From.Reg == reg {
w.WriteByte(0x22) // tee_local
p = p.Link
} else {
w.WriteByte(0x21) // set_local
}
writeUleb128(w, uint64(reg-REG_R0))
default:
panic("bad Set: invalid register")
}
continue
case ATee:
if p.To.Type != obj.TYPE_REG {
panic("bad Tee: argument is not a register")
}
reg := p.To.Reg
switch {
case reg >= REG_R0 && reg <= REG_F15:
w.WriteByte(0x22) // tee_local
writeUleb128(w, uint64(reg-REG_R0))
default:
panic("bad Tee: invalid register")
}
continue
case ANot:
w.WriteByte(0x45) // i32.eqz
continue
case obj.AUNDEF:
w.WriteByte(0x00) // unreachable
continue
case obj.ANOP, obj.ATEXT, obj.AFUNCDATA, obj.APCDATA:
// ignore
continue
}
switch {
case p.As < AUnreachable || p.As > AF64ReinterpretI64:
panic(fmt.Sprintf("unexpected assembler op: %s", p.As))
case p.As < AEnd:
w.WriteByte(byte(p.As - AUnreachable + 0x00))
case p.As < ADrop:
w.WriteByte(byte(p.As - AEnd + 0x0B))
case p.As < AI32Load:
w.WriteByte(byte(p.As - ADrop + 0x1A))
default:
w.WriteByte(byte(p.As - AI32Load + 0x28))
}
switch p.As {
case ABlock, ALoop, AIf:
if p.From.Offset != 0 {
// block type, rarely used, e.g. for code compiled with emscripten
w.WriteByte(0x80 - byte(p.From.Offset))
continue
}
w.WriteByte(0x40)
case ABr, ABrIf:
if p.To.Type != obj.TYPE_CONST {
panic("bad Br/BrIf")
}
writeUleb128(w, uint64(p.To.Offset))
case ABrTable:
idxs := p.To.Val.([]uint64)
writeUleb128(w, uint64(len(idxs)-1))
for _, idx := range idxs {
writeUleb128(w, idx)
}
case ACall:
switch p.To.Type {
case obj.TYPE_CONST:
writeUleb128(w, uint64(p.To.Offset))
case obj.TYPE_MEM:
if p.To.Name != obj.NAME_EXTERN && p.To.Name != obj.NAME_STATIC {
fmt.Println(p.To)
panic("bad name for Call")
}
r := obj.Addrel(s)
r.Off = int32(w.Len())
r.Type = objabi.R_CALL
if p.Mark&WasmImport != 0 {
r.Type = objabi.R_WASMIMPORT
}
r.Sym = p.To.Sym
default:
panic("bad type for Call")
}
case ACallIndirect:
writeUleb128(w, uint64(p.To.Offset))
w.WriteByte(0x00) // reserved value
case AI32Const, AI64Const:
if p.From.Name == obj.NAME_EXTERN {
r := obj.Addrel(s)
r.Off = int32(w.Len())
r.Type = objabi.R_ADDR
r.Sym = p.From.Sym
r.Add = p.From.Offset
break
}
writeSleb128(w, p.From.Offset)
case AF64Const:
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, math.Float64bits(p.From.Val.(float64)))
w.Write(b)
case AI32Load, AI64Load, AF32Load, AF64Load, AI32Load8S, AI32Load8U, AI32Load16S, AI32Load16U, AI64Load8S, AI64Load8U, AI64Load16S, AI64Load16U, AI64Load32S, AI64Load32U:
if p.From.Offset < 0 {
panic("negative offset for *Load")
}
if p.From.Type != obj.TYPE_CONST {
panic("bad type for *Load")
}
writeUleb128(w, align(p.As))
writeUleb128(w, uint64(p.From.Offset))
case AI32Store, AI64Store, AF32Store, AF64Store, AI32Store8, AI32Store16, AI64Store8, AI64Store16, AI64Store32:
if p.To.Offset < 0 {
panic("negative offset")
}
writeUleb128(w, align(p.As))
writeUleb128(w, uint64(p.To.Offset))
case ACurrentMemory, AGrowMemory:
w.WriteByte(0x00)
}
}
w.WriteByte(0x0b) // end
s.P = w.Bytes()
}
func align(as obj.As) uint64 {
switch as {
case AI32Load8S, AI32Load8U, AI64Load8S, AI64Load8U, AI32Store8, AI64Store8:
return 0
case AI32Load16S, AI32Load16U, AI64Load16S, AI64Load16U, AI32Store16, AI64Store16:
return 1
case AI32Load, AF32Load, AI64Load32S, AI64Load32U, AI32Store, AF32Store, AI64Store32:
return 2
case AI64Load, AF64Load, AI64Store, AF64Store:
return 3
default:
panic("align: bad op")
}
}
func writeUleb128(w io.ByteWriter, v uint64) {
more := true
for more {
c := uint8(v & 0x7f)
v >>= 7
more = v != 0
if more {
c |= 0x80
}
w.WriteByte(c)
}
}
func writeSleb128(w io.ByteWriter, v int64) {
more := true
for more {
c := uint8(v & 0x7f)
s := uint8(v & 0x40)
v >>= 7
more = !((v == 0 && s == 0) || (v == -1 && s != 0))
if more {
c |= 0x80
}
w.WriteByte(c)
}
}

View File

@ -40,6 +40,7 @@ const (
Hdarwin
Hdragonfly
Hfreebsd
Hjs
Hlinux
Hnacl
Hnetbsd
@ -57,6 +58,8 @@ func (h *HeadType) Set(s string) error {
*h = Hdragonfly
case "freebsd":
*h = Hfreebsd
case "js":
*h = Hjs
case "linux", "android":
*h = Hlinux
case "nacl":
@ -85,6 +88,8 @@ func (h *HeadType) String() string {
return "dragonfly"
case Hfreebsd:
return "freebsd"
case Hjs:
return "js"
case Hlinux:
return "linux"
case Hnacl:

View File

@ -193,6 +193,9 @@ const (
// R_ADDRCUOFF resolves to a pointer-sized offset from the start of the
// symbol's DWARF compile unit.
R_ADDRCUOFF
// R_WASMIMPORT resolves to the index of the WebAssembly function import.
R_WASMIMPORT
)
// IsDirectJump returns whether r is a relocation for a direct jump.

View File

@ -21,6 +21,7 @@ const (
MIPS64
PPC64
S390X
Wasm
)
// Arch represents an individual architecture.
@ -160,6 +161,15 @@ var ArchS390X = &Arch{
MinLC: 2,
}
var ArchWasm = &Arch{
Name: "wasm",
Family: Wasm,
ByteOrder: binary.LittleEndian,
PtrSize: 8,
RegSize: 8,
MinLC: 1,
}
var Archs = [...]*Arch{
Arch386,
ArchAMD64,
@ -173,4 +183,5 @@ var Archs = [...]*Arch{
ArchPPC64,
ArchPPC64LE,
ArchS390X,
ArchWasm,
}