1
0
mirror of https://github.com/golang/go synced 2024-11-23 07:30:05 -07:00

all: remove nacl (part 3, more amd64p32)

Part 1: CL 199499 (GOOS nacl)
Part 2: CL 200077 (amd64p32 files, toolchain)
Part 3: stuff that arguably should've been part of Part 2, but I forgot
        one of my grep patterns when splitting the original CL up into
        two parts.

This one might also have interesting stuff to resurrect for any future
x32 ABI support.

Updates #30439

Change-Id: I2b4143374a253a003666f3c69e776b7e456bdb9c
Reviewed-on: https://go-review.googlesource.com/c/go/+/200318
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
This commit is contained in:
Brad Fitzpatrick 2019-10-10 16:16:54 +00:00
parent 6dc740f092
commit 03ef105dae
47 changed files with 56 additions and 154 deletions

View File

@ -56,8 +56,6 @@ func Set(GOARCH string) *Arch {
return archX86(&x86.Link386)
case "amd64":
return archX86(&x86.Linkamd64)
case "amd64p32":
return archX86(&x86.Linkamd64p32)
case "arm":
return archArm()
case "arm64":

View File

@ -7,17 +7,12 @@ package amd64
import (
"cmd/compile/internal/gc"
"cmd/internal/obj/x86"
"cmd/internal/objabi"
)
var leaptr = x86.ALEAQ
func Init(arch *gc.Arch) {
arch.LinkArch = &x86.Linkamd64
if objabi.GOARCH == "amd64p32" {
arch.LinkArch = &x86.Linkamd64p32
leaptr = x86.ALEAL
}
arch.REGSP = x86.REGSP
arch.MAXWIDTH = 1 << 50

View File

@ -210,19 +210,6 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
c.FPReg = framepointerRegAMD64
c.LinkReg = linkRegAMD64
c.hasGReg = false
case "amd64p32":
c.PtrSize = 4
c.RegSize = 8
c.lowerBlock = rewriteBlockAMD64
c.lowerValue = rewriteValueAMD64
c.splitLoad = rewriteValueAMD64splitload
c.registers = registersAMD64[:]
c.gpRegMask = gpRegMaskAMD64
c.fpRegMask = fpRegMaskAMD64
c.FPReg = framepointerRegAMD64
c.LinkReg = linkRegAMD64
c.hasGReg = false
c.noDuffDevice = true
case "386":
c.PtrSize = 4
c.RegSize = 4

View File

@ -1061,7 +1061,7 @@ func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
// lowers them, so we only perform this optimization on platforms that we know to
// have fast Move ops.
switch c.arch {
case "amd64", "amd64p32":
case "amd64":
return sz <= 16 || (sz < 1024 && disjoint(dst, sz, src, sz))
case "386", "ppc64", "ppc64le", "arm64":
return sz <= 8
@ -1077,7 +1077,7 @@ func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
// for sizes < 32-bit. This is used to decide whether to promote some rotations.
func hasSmallRotate(c *Config) bool {
switch c.arch {
case "amd64", "amd64p32", "386":
case "amd64", "386":
return true
default:
return false

View File

@ -24,7 +24,6 @@ import (
var archInits = map[string]func(*gc.Arch){
"386": x86.Init,
"amd64": amd64.Init,
"amd64p32": amd64.Init,
"arm": arm.Init,
"arm64": arm64.Init,
"mips": mips.Init,

View File

@ -61,7 +61,6 @@ var (
var okgoarch = []string{
"386",
"amd64",
"amd64p32",
"arm",
"arm64",
"mips",
@ -86,6 +85,7 @@ var okgoos = []string{
"android",
"solaris",
"freebsd",
"nacl", // keep;
"netbsd",
"openbsd",
"plan9",

View File

@ -210,7 +210,7 @@ var KnownOS = map[string]bool{
"illumos": true,
"js": true,
"linux": true,
"nacl": true,
"nacl": true, // legacy; don't remove
"netbsd": true,
"openbsd": true,
"plan9": true,
@ -222,7 +222,7 @@ var KnownOS = map[string]bool{
var KnownArch = map[string]bool{
"386": true,
"amd64": true,
"amd64p32": true,
"amd64p32": true, // legacy; don't remove
"arm": true,
"armbe": true,
"arm64": true,

View File

@ -2390,7 +2390,7 @@ func (b *Builder) gccArchArgs() []string {
switch cfg.Goarch {
case "386":
return []string{"-m32"}
case "amd64", "amd64p32":
case "amd64":
return []string{"-m64"}
case "arm":
return []string{"-marm"} // not thumb

View File

@ -1226,16 +1226,6 @@ var Linkamd64 = obj.LinkArch{
DWARFRegisters: AMD64DWARFRegisters,
}
var Linkamd64p32 = obj.LinkArch{
Arch: sys.ArchAMD64P32,
Init: instinit,
Preprocess: preprocess,
Assemble: span6,
Progedit: progedit,
UnaryDst: unaryDst,
DWARFRegisters: AMD64DWARFRegisters,
}
var Link386 = obj.LinkArch{
Arch: sys.Arch386,
Init: instinit,

View File

@ -241,7 +241,7 @@ func (d *Disasm) Print(w io.Writer, filter *regexp.Regexp, start, end uint64, pr
fmt.Fprintf(tw, " %s:%d\t%#x\t", base(file), line, pc)
}
if size%4 != 0 || d.goarch == "386" || d.goarch == "amd64" || d.goarch == "amd64p32" {
if size%4 != 0 || d.goarch == "386" || d.goarch == "amd64" {
// Print instruction as bytes.
fmt.Fprintf(tw, "%x", code[i:i+size])
} else {
@ -367,24 +367,22 @@ func disasm_ppc64(code []byte, pc uint64, lookup lookupFunc, byteOrder binary.By
}
var disasms = map[string]disasmFunc{
"386": disasm_386,
"amd64": disasm_amd64,
"amd64p32": disasm_amd64,
"arm": disasm_arm,
"arm64": disasm_arm64,
"ppc64": disasm_ppc64,
"ppc64le": disasm_ppc64,
"386": disasm_386,
"amd64": disasm_amd64,
"arm": disasm_arm,
"arm64": disasm_arm64,
"ppc64": disasm_ppc64,
"ppc64le": disasm_ppc64,
}
var byteOrders = map[string]binary.ByteOrder{
"386": binary.LittleEndian,
"amd64": binary.LittleEndian,
"amd64p32": binary.LittleEndian,
"arm": binary.LittleEndian,
"arm64": binary.LittleEndian,
"ppc64": binary.BigEndian,
"ppc64le": binary.LittleEndian,
"s390x": binary.BigEndian,
"386": binary.LittleEndian,
"amd64": binary.LittleEndian,
"arm": binary.LittleEndian,
"arm64": binary.LittleEndian,
"ppc64": binary.BigEndian,
"ppc64le": binary.LittleEndian,
"s390x": binary.BigEndian,
}
type Liner interface {

View File

@ -7,8 +7,7 @@ package sys
import "encoding/binary"
// ArchFamily represents a family of one or more related architectures.
// For example, amd64 and amd64p32 are both members of the AMD64 family,
// and ppc64 and ppc64le are both members of the PPC64 family.
// For example, ppc64 and ppc64le are both members of the PPC64 family.
type ArchFamily byte
const (
@ -72,15 +71,6 @@ var ArchAMD64 = &Arch{
MinLC: 1,
}
var ArchAMD64P32 = &Arch{
Name: "amd64p32",
Family: AMD64,
ByteOrder: binary.LittleEndian,
PtrSize: 4,
RegSize: 8,
MinLC: 1,
}
var ArchARM = &Arch{
Name: "arm",
Family: ARM,
@ -183,7 +173,6 @@ var ArchWasm = &Arch{
var Archs = [...]*Arch{
Arch386,
ArchAMD64,
ArchAMD64P32,
ArchARM,
ArchARM64,
ArchMIPS,

View File

@ -38,9 +38,6 @@ import (
func Init() (*sys.Arch, ld.Arch) {
arch := sys.ArchAMD64
if objabi.GOARCH == "amd64p32" {
arch = sys.ArchAMD64P32
}
theArch := ld.Arch{
Funcalign: funcAlign,

View File

@ -45,7 +45,7 @@ func main() {
os.Exit(2)
case "386":
arch, theArch = x86.Init()
case "amd64", "amd64p32":
case "amd64":
arch, theArch = amd64.Init()
case "arm":
arch, theArch = arm.Init()

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64 amd64p32 386 arm ppc64le ppc64 s390x arm64
// +build amd64 386 arm ppc64le ppc64 s390x arm64
package md5

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64,!amd64p32,!386,!arm,!ppc64le,!ppc64,!s390x,!arm64
// +build !amd64,!386,!arm,!ppc64le,!ppc64,!s390x,!arm64
package md5

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64p32 arm 386 s390x
// +build arm 386 s390x
package sha1

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64,!amd64p32,!386,!arm,!s390x,!arm64
// +build !amd64,!386,!arm,!s390x,!arm64
package sha1

View File

@ -4,5 +4,8 @@
package build
// List of past, present, and future known GOOS and GOARCH values.
// Do not remove from this list, as these are used for go/build filename matching.
const goosList = "aix android darwin dragonfly freebsd hurd illumos js linux nacl netbsd openbsd plan9 solaris windows zos "
const goarchList = "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc riscv riscv64 s390 s390x sparc sparc64 wasm "

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64,!amd64p32,!s390x,!ppc64le,!arm64
// +build !amd64,!s390x,!ppc64le,!arm64
package crc32

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !386,!amd64,!amd64p32,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!wasm,!mips64,!mips64le
// +build !386,!amd64,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!wasm,!mips64,!mips64le
package bytealg

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build 386 amd64 amd64p32 s390x arm arm64 ppc64 ppc64le mips mipsle wasm mips64 mips64le
// +build 386 amd64 s390x arm arm64 ppc64 ppc64le mips mipsle wasm mips64 mips64le
package bytealg

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !386,!amd64,!amd64p32,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!mips64,!mips64le,!wasm
// +build !386,!amd64,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!mips64,!mips64le,!wasm
package bytealg

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build 386 amd64 amd64p32 s390x arm arm64 ppc64 ppc64le mips mipsle mips64 mips64le wasm
// +build 386 amd64 s390x arm arm64 ppc64 ppc64le mips mipsle mips64 mips64le wasm
package bytealg

View File

@ -4,7 +4,6 @@
// +build !386
// +build !amd64
// +build !amd64p32
// +build !arm
// +build !arm64
// +build !ppc64

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build 386 amd64 amd64p32
// +build 386 amd64
package cpu
@ -55,8 +55,8 @@ func doinit() {
{Name: "sse42", Feature: &X86.HasSSE42},
{Name: "ssse3", Feature: &X86.HasSSSE3},
// These capabilities should always be enabled on amd64(p32):
{Name: "sse2", Feature: &X86.HasSSE2, Required: GOARCH == "amd64" || GOARCH == "amd64p32"},
// These capabilities should always be enabled on amd64:
{Name: "sse2", Feature: &X86.HasSSE2, Required: GOARCH == "amd64"},
}
maxID, _, _, _ := cpuid(0, 0)

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build 386 amd64 amd64p32
// +build 386 amd64
#include "textflag.h"

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build 386 amd64 amd64p32
// +build 386 amd64
package cpu_test

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64 amd64p32
// +build amd64
package math

View File

@ -6104,9 +6104,6 @@ var funcLayoutTests []funcLayoutTest
func init() {
var argAlign uintptr = PtrSize
if runtime.GOARCH == "amd64p32" {
argAlign = 2 * PtrSize
}
roundup := func(x uintptr, a uintptr) uintptr {
return (x + a - 1) / a * a
}

View File

@ -16,7 +16,6 @@
package reflect
import (
"runtime"
"strconv"
"sync"
"unicode"
@ -3015,9 +3014,6 @@ func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset
offset += arg.size
}
argSize = offset
if runtime.GOARCH == "amd64p32" {
offset += -offset & (8 - 1)
}
offset += -offset & (ptrSize - 1)
retOffset = offset
for _, res := range t.out() {
@ -3033,9 +3029,6 @@ func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset
size: offset,
ptrdata: uintptr(ptrmap.n) * ptrSize,
}
if runtime.GOARCH == "amd64p32" {
x.align = 8
}
if ptrmap.n > 0 {
x.gcdata = &ptrmap.data[0]
}

View File

@ -555,9 +555,6 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool) {
// Copy results back into argument frame.
if numOut > 0 {
off += -off & (ptrSize - 1)
if runtime.GOARCH == "amd64p32" {
off = align(off, 8)
}
for i, typ := range ftyp.out() {
v := out[i]
if v.typ == nil {
@ -697,8 +694,7 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool) {
// Copy in receiver and rest of args.
storeRcvr(rcvr, scratch)
// Align the first arg. Only on amd64p32 the alignment can be
// larger than ptrSize.
// Align the first arg. The alignment can't be larger than ptrSize.
argOffset := uintptr(ptrSize)
if len(t.in()) > 0 {
argOffset = align(argOffset, uintptr(t.in()[0].align))
@ -713,17 +709,11 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool) {
// and then copies the results back into scratch.
call(frametype, fn, scratch, uint32(frametype.size), uint32(retOffset))
// Copy return values. On amd64p32, the beginning of return values
// is 64-bit aligned, so the caller's frame layout (which doesn't have
// a receiver) is different from the layout of the fn call, which has
// a receiver.
// Copy return values.
// Ignore any changes to args and just copy return values.
// Avoid constructing out-of-bounds pointers if there are no return values.
if frametype.size-retOffset > 0 {
callerRetOffset := retOffset - argOffset
if runtime.GOARCH == "amd64p32" {
callerRetOffset = align(argSize-argOffset, 8)
}
// This copies to the stack. Write barriers are not needed.
memmove(add(frame, callerRetOffset, "frametype.size > retOffset"),
add(scratch, retOffset, "frametype.size > retOffset"),

View File

@ -19,9 +19,6 @@ GLOBL runtime·no_pointers_stackmap(SB),RODATA, $8
#ifdef GOARCH_386
#define SKIP4 BYTE $0x90; BYTE $0x90; BYTE $0x90; BYTE $0x90
#endif
#ifdef GOARCH_amd64p32
#define SKIP4 BYTE $0x90; BYTE $0x90; BYTE $0x90; BYTE $0x90
#endif
#ifdef GOARCH_wasm
#define SKIP4 UNDEF; UNDEF; UNDEF; UNDEF
#endif

View File

@ -187,14 +187,6 @@ func infoBigStruct() []byte {
typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
typePointer, typeScalar, // i string
}
case "amd64p32":
return []byte{
typePointer, // q *int
typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
typePointer, typeScalar, typeScalar, // r []byte
typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
typePointer, typeScalar, // i string
}
default:
panic("unknown arch")
}

View File

@ -11,11 +11,6 @@
#define g(r) 0(r)(TLS*1)
#endif
#ifdef GOARCH_amd64p32
#define get_tls(r) MOVL TLS, r
#define g(r) 0(r)(TLS*1)
#endif
#ifdef GOARCH_386
#define get_tls(r) MOVL TLS, r
#define g(r) 0(r)(TLS*1)

View File

@ -6,7 +6,7 @@
// xxhash: https://code.google.com/p/xxhash/
// cityhash: https://code.google.com/p/cityhash/
// +build amd64 amd64p32 arm64 mips64 mips64le ppc64 ppc64le s390x wasm
// +build amd64 arm64 mips64 mips64le ppc64 ppc64le s390x wasm
package runtime

View File

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64 amd64p32
package atomic
import "unsafe"

View File

@ -86,14 +86,8 @@ func TestUnaligned64(t *testing.T) {
// a continual source of pain. Test that on 32-bit systems they crash
// instead of failing silently.
switch runtime.GOARCH {
default:
if unsafe.Sizeof(int(0)) != 4 {
t.Skip("test only runs on 32-bit systems")
}
case "amd64p32":
// amd64p32 can handle unaligned atomics.
t.Skipf("test not needed on %v", runtime.GOARCH)
if unsafe.Sizeof(int(0)) != 4 {
t.Skip("test only runs on 32-bit systems")
}
x := make([]uint32, 4)

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build 386 amd64p32 arm mips mipsle
// +build 386 arm mips mipsle
package runtime

View File

@ -922,7 +922,7 @@ var (
// Information about what cpu features are available.
// Packages outside the runtime should not use these
// as they are not an external api.
// Set on startup in asm_{386,amd64,amd64p32}.s
// Set on startup in asm_{386,amd64}.s
processorVersionInfo uint32
isIntel bool
lfenceBeforeRdtsc bool

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64 amd64p32
// +build amd64
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package runtime

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build 386 arm amd64p32 mips mipsle
// +build 386 arm mips mipsle
package runtime

View File

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64 amd64p32
package runtime
// stackcheck checks that SP is in range [g->stack.lo, g->stack.hi).

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64 amd64p32 386
// +build amd64 386
package runtime

View File

@ -84,7 +84,7 @@ const (
// and ppc64le.
// Tracing won't work reliably for architectures where cputicks is emulated
// by nanotime, so the value doesn't matter for those architectures.
traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64|sys.GoarchAmd64p32)
traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64)
// Maximum number of PCs in a single stack trace.
// Since events contain only stack id rather than whole stack trace,
// we can allow quite large values here.

View File

@ -26,8 +26,8 @@ import (
// takes up only 4 bytes on the stack, while on 64-bit systems it takes up 8 bytes.
// Typically this is ptrSize.
//
// As an exception, amd64p32 has ptrSize == 4 but the CALL instruction still
// stores an 8-byte return PC onto the stack. To accommodate this, we use regSize
// As an exception, amd64p32 had ptrSize == 4 but the CALL instruction still
// stored an 8-byte return PC onto the stack. To accommodate this, we used regSize
// as the size of the architecture-pushed return PC.
//
// usesLR is defined below in terms of minFrameSize, which is defined in

View File

@ -1391,15 +1391,8 @@ func TestUnaligned64(t *testing.T) {
// Unaligned 64-bit atomics on 32-bit systems are
// a continual source of pain. Test that on 32-bit systems they crash
// instead of failing silently.
switch runtime.GOARCH {
default:
if !arch32 {
t.Skip("test only runs on 32-bit systems")
}
case "amd64p32":
// amd64p32 can handle unaligned atomics.
t.Skipf("test not needed on %v", runtime.GOARCH)
if !arch32 {
t.Skip("test only runs on 32-bit systems")
}
x := make([]uint32, 4)

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// +build 386 amd64 amd64p32 arm arm64 ppc64le mips64le mipsle wasm
// +build 386 amd64 arm arm64 ppc64le mips64le mipsle wasm
package syscall