mirror of
https://github.com/golang/go
synced 2024-11-08 05:26:15 -07:00
35ea62468b
This commit adds the js/wasm architecture to the runtime package. Currently WebAssembly has no support for threads yet, see https://github.com/WebAssembly/design/issues/1073. Because of that, there is no preemption of goroutines and no sysmon goroutine. Design doc: https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4 About WebAssembly assembly files: https://docs.google.com/document/d/1GRmy3rA4DiYtBlX-I1Jr_iHykbX8EixC3Mq0TCYqbKc Updates #18892 Change-Id: I7f12d21b5180500d55ae9fd2f7e926a1731db391 Reviewed-on: https://go-review.googlesource.com/103877 Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
45 lines
1.5 KiB
Go
45 lines
1.5 KiB
Go
// Copyright 2014 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// +build amd64 arm64 mips64 mips64le ppc64 ppc64le s390x wasm
|
|
|
|
package runtime
|
|
|
|
import "unsafe"
|
|
|
|
const (
|
|
// addrBits is the number of bits needed to represent a virtual address.
|
|
//
|
|
// See heapAddrBits for a table of address space sizes on
|
|
// various architectures. 48 bits is enough for all
|
|
// architectures except s390x.
|
|
//
|
|
// On AMD64, virtual addresses are 48-bit (or 57-bit) numbers sign extended to 64.
|
|
// We shift the address left 16 to eliminate the sign extended part and make
|
|
// room in the bottom for the count.
|
|
//
|
|
// On s390x, virtual addresses are 64-bit. There's not much we
|
|
// can do about this, so we just hope that the kernel doesn't
|
|
// get to really high addresses and panic if it does.
|
|
addrBits = 48
|
|
|
|
// In addition to the 16 bits taken from the top, we can take 3 from the
|
|
// bottom, because node must be pointer-aligned, giving a total of 19 bits
|
|
// of count.
|
|
cntBits = 64 - addrBits + 3
|
|
)
|
|
|
|
func lfstackPack(node *lfnode, cnt uintptr) uint64 {
|
|
return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<<cntBits-1))
|
|
}
|
|
|
|
func lfstackUnpack(val uint64) *lfnode {
|
|
if GOARCH == "amd64" {
|
|
// amd64 systems can place the stack above the VA hole, so we need to sign extend
|
|
// val before unpacking.
|
|
return (*lfnode)(unsafe.Pointer(uintptr(int64(val) >> cntBits << 3)))
|
|
}
|
|
return (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3)))
|
|
}
|