1
0
mirror of https://github.com/golang/go synced 2024-11-12 03:40:21 -07:00

Merge branch 'master' of github.com:golang/go into cg/resilient-timeout-handler

This commit is contained in:
Charlie Getzen 2021-11-05 12:15:13 -05:00
commit 14abd7e4d7
1694 changed files with 37366 additions and 14670 deletions

View File

@ -817,6 +817,7 @@ Lehner Florian <dev@der-flo.net>
Leigh McCulloch <leighmcc@gmail.com>
Leo Antunes <leo@costela.net>
Leon Klingele <git@leonklingele.de>
Leonard Wang <wangdeyu0907@gmail.com> <wangdeyu@golangcn.org>
Leonel Quinteros <leonel.quinteros@gmail.com>
Lev Shamardin <shamardin@gmail.com>
Lewin Bormann <lewin.bormann@gmail.com>
@ -1015,6 +1016,7 @@ Nathan Youngman <git@nathany.com>
Nathaniel Cook <nvcook42@gmail.com>
Naveen Kumar Sangi <naveenkumarsangi@protonmail.com>
Neelesh Chandola <neelesh.c98@gmail.com>
Neil Alexander <neilalexander@neilalexander.dev>
Neil Lyons <nwjlyons@googlemail.com>
Netflix, Inc.
Neuman Vong <neuman.vong@gmail.com>

View File

@ -1569,7 +1569,7 @@ Leigh McCulloch <leighmcc@gmail.com>
Leo Antunes <leo@costela.net>
Leo Rudberg <ljr@google.com>
Leon Klingele <git@leonklingele.de>
Leonard Wang <wangdeyu0907@gmail.com>
Leonard Wang <wangdeyu0907@gmail.com> <wangdeyu@golangcn.org>
Leonardo Comelli <leonardo.comelli@gmail.com>
Leonel Quinteros <leonel.quinteros@gmail.com>
Lev Shamardin <shamardin@gmail.com>
@ -1901,6 +1901,7 @@ Naveen Kumar Sangi <naveenkumarsangi@protonmail.com>
Neeilan Selvalingam <neeilan96@gmail.com>
Neelesh Chandola <neelesh.c98@gmail.com>
Nehal J Wani <nehaljw.kkd1@gmail.com>
Neil Alexander <neilalexander@neilalexander.dev>
Neil Lyons <nwjlyons@googlemail.com>
Neuman Vong <neuman.vong@gmail.com>
Neven Sajko <nsajko@gmail.com>

View File

@ -1,111 +1,13 @@
pkg syscall (darwin-amd64), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (darwin-amd64), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (darwin-amd64), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (darwin-amd64), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (darwin-amd64-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (darwin-amd64-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (darwin-amd64-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (darwin-amd64-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (freebsd-386), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (freebsd-386), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (freebsd-386), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (freebsd-386), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (freebsd-386-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (freebsd-386-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (freebsd-386-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (freebsd-386-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (freebsd-amd64), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (freebsd-amd64), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (freebsd-amd64), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (freebsd-amd64), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (freebsd-amd64-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (freebsd-amd64-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (freebsd-amd64-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (freebsd-amd64-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (freebsd-arm), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (freebsd-arm), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (freebsd-arm), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (freebsd-arm), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (freebsd-arm-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (freebsd-arm-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (freebsd-arm-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (freebsd-arm-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (linux-386), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (linux-386), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (linux-386), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (linux-386), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (linux-386-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (linux-386-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (linux-386-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (linux-386-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (linux-amd64), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (linux-amd64), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (linux-amd64), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (linux-amd64), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (linux-amd64-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (linux-amd64-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (linux-amd64-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (linux-amd64-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (linux-arm), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (linux-arm), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (linux-arm), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (linux-arm), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (linux-arm-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (linux-arm-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (linux-arm-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (linux-arm-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (netbsd-386), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (netbsd-386), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (netbsd-386), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (netbsd-386), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (netbsd-386-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (netbsd-386-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (netbsd-386-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (netbsd-386-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (netbsd-amd64), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (netbsd-amd64), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (netbsd-amd64), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (netbsd-amd64), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (netbsd-amd64-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (netbsd-amd64-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (netbsd-amd64-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (netbsd-amd64-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (netbsd-arm), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (netbsd-arm), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (netbsd-arm), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (netbsd-arm), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (netbsd-arm-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (netbsd-arm-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (netbsd-arm-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (netbsd-arm-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (netbsd-arm64), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (netbsd-arm64), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (netbsd-arm64), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (netbsd-arm64), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (netbsd-arm64-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (netbsd-arm64-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (netbsd-arm64-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (netbsd-arm64-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (openbsd-386), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (openbsd-386), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (openbsd-386), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (openbsd-386), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (openbsd-386-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (openbsd-386-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (openbsd-386-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (openbsd-386-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (openbsd-amd64), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (openbsd-amd64), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (openbsd-amd64), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (openbsd-amd64), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (openbsd-amd64-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
pkg syscall (openbsd-amd64-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
pkg syscall (openbsd-amd64-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
pkg syscall (openbsd-amd64-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
pkg syscall (windows-386), func WSASendtoInet4(Handle, *WSABuf, uint32, *uint32, uint32, SockaddrInet4, *Overlapped, *uint8) error
pkg syscall (windows-386), func WSASendtoInet6(Handle, *WSABuf, uint32, *uint32, uint32, SockaddrInet6, *Overlapped, *uint8) error
pkg syscall (windows-amd64), func WSASendtoInet4(Handle, *WSABuf, uint32, *uint32, uint32, SockaddrInet4, *Overlapped, *uint8) error
pkg syscall (windows-amd64), func WSASendtoInet6(Handle, *WSABuf, uint32, *uint32, uint32, SockaddrInet6, *Overlapped, *uint8) error
pkg debug/buildinfo, func Read(io.ReaderAt) (*debug.BuildInfo, error)
pkg debug/buildinfo, func ReadFile(string) (*debug.BuildInfo, error)
pkg debug/buildinfo, type BuildInfo = debug.BuildInfo
pkg runtime/debug, method (*BuildInfo) MarshalText() ([]byte, error)
pkg runtime/debug, method (*BuildInfo) UnmarshalText() ([]byte, error)
pkg runtime/debug, type BuildInfo struct, GoVersion string
pkg runtime/debug, type BuildInfo struct, Settings []BuildSetting
pkg runtime/debug, type BuildSetting struct
pkg runtime/debug, type BuildSetting struct, Key string
pkg runtime/debug, type BuildSetting struct, Value string
pkg testing, func Fuzz(func(*F)) FuzzResult
pkg testing, func MainStart(testDeps, []InternalTest, []InternalBenchmark, []InternalFuzzTarget, []InternalExample) *M
pkg testing, func RunFuzzTargets(func(string, string) (bool, error), []InternalFuzzTarget) bool

View File

@ -65,6 +65,44 @@ Do not send CLs removing the interior tags from such phrases.
and installs packages, as before.
</p>
<p><!-- golang.org/issue/37475 -->
The <code>go</code> command now embeds version control information in
binaries including the currently checked-out revision, commit time, and a
flag indicating whether edited or untracked files are present. Version
control information is embedded if the <code>go</code> command is invoked in
a directory within a Git, Mercurial, Fossil, or Bazaar repository, and the
<code>main</code> package and its containing main module are in the same
repository. This information may be omitted using the flag
<code>-buildvcs=false</code>.
</p>
<p><!-- golang.org/issue/37475 -->
Additionally, the <code>go</code> command embeds information about the build
including build and tool tags (set with <code>-tags</code>), compiler,
assembler, and linker flags (like <code>-gcflags</code>), whether cgo was
enabled, and if it was, the values of the cgo environment variables
(like <code>CGO_CFLAGS</code>). This information may be omitted using the
flag <code>-buildinfo=false</code>. Both VCS and build information may be
read together with module information using <code>go</code>
<code>version</code> <code>-m</code> <code>file</code> or
<code>runtime/debug.ReadBuildInfo</code> (for the currently running binary)
or the new <a href="#debug/buildinfo"><code>debug/buildinfo</code></a>
package.
</p>
<p><!-- https://golang.org/issue/44435 -->
If the main module's <code>go.mod</code> file
specifies <a href="/ref/mod#go-mod-file-go"><code>go</code> <code>1.17</code></a>
or higher, <code>go</code> <code>mod</code> <code>download</code> without
arguments now downloads source code for only the modules
explicitly <a href="/ref/mod#go-mod-file-require">required</a> in the main
module's <code>go.mod</code> file. (In a <code>go</code> <code>1.17</code> or
higher module, that set already includes all dependencies needed to build the
packages and tests in the main module.)
To also download source code for transitive dependencies, use
<code>go</code> <code>mod</code> <code>download</code> <code>all</code>.
</p>
<p>
TODO: complete this section, or delete if not needed
</p>
@ -74,7 +112,7 @@ Do not send CLs removing the interior tags from such phrases.
<p><!-- https://golang.org/issue/43566 -->
<code>gofmt</code> now reads and formats input files concurrently, with a
memory limit proportional to <code>GOMAXPROCS</code>. On a machine with
multiple CPUs, gofmt should now be significantly faster.
multiple CPUs, <code>gofmt</code> should now be significantly faster.
</p>
@ -98,10 +136,33 @@ Do not send CLs removing the interior tags from such phrases.
<h2 id="library">Core library</h2>
<h3>TODO</h3>
<p>
TODO: complete this section
</p>
<h3 id="netip">New <code>net/netip</code> package</h3>
<p>
The new <a href="/pkg/net/netip/"><code>net/netip</code></a>
package defines a new IP address type, <a href="/pkg/net/netip/#Addr"><code>Addr</code></a>.
Compared to the existing
<a href="/pkg/net/#IP"><code>net.IP</code></a> type, the <code>netip.Addr</code> type takes less
memory, is immutable, and is comparable so it supports <code>==</code>
and can be used as a map key.
</p>
<p>
In addition to <code>Addr</code>, the package defines
<a href="/pkg/net/netip/#AddrPort"><code>AddrPort</code></a>, representing
an IP and port, and
<a href="/pkg/net/netip/#Prefix"><code>Prefix</code></a>, representing
a network CIDR prefix.
</p>
<p>
The <code>net</code> package now has methods to send and receive UDP packets
using <code>netip.Addr</code> values instead of the relatively heavy
<code>*net.UDPAddr</code> values.
</p>
<h3 id="minor_library_changes">Minor changes to the library</h3>
<p>
@ -114,6 +175,19 @@ Do not send CLs removing the interior tags from such phrases.
TODO: complete this section
</p>
<dl id="debug/buildinfo"><dt><a href="/pkg/debug/buildinfo">debug/buildinfo</a></dt>
<dd>
<p><!-- golang.org/issue/39301 -->
This new package provides access to module versions, version control
information, and build flags embedded in executable files built by
the <code>go</code> command. The same information is also available via
<a href="/pkg/runtime/debug#ReadBuildInfo"><code>runtime/debug.ReadBuildInfo</code></a>
for the currently running binary and via <code>go</code>
<code>version</code> <code>-m</code> on the command line.
</p>
</dd>
</dl>
<dl id="image/draw"><dt><a href="/pkg/image/draw/">image/draw</a></dt>
<dd>
<p><!-- CL 340049 -->
@ -127,12 +201,35 @@ Do not send CLs removing the interior tags from such phrases.
</dd>
</dl><!-- image/draw -->
<dl id="reflect"><dt><a href="/pkg/reflect/">reflect</a></dt>
<dd>
<p><!-- CL 356049, 320929 -->
The new
<a href="/pkg/reflect/#Value.SetIterKey"><code>Value.SetIterKey</code></a>
and <a href="/pkg/reflect/#Value.SetIterValue"><code>Value.SetIterValue</code></a>
methods set a Value using a map iterator as the source. They are equivalent to
<code>Value.Set(iter.Key())</code> and <code>Value.Set(iter.Value())</code> but
do fewer allocations.
</p>
</dd>
<dd>
<p><!-- CL 350691 -->
The new
<a href="/pkg/reflect/#Value.UnsafePointer"><code>Value.UnsafePointer</code></a>
method returns the Value's value as an <a href="/pkg/unsafe/#Pointer"><code>unsafe.Pointer</code></a>.
This allows callers to migrate from <a href="/pkg/reflect/#Value.UnsafeAddr"><code>Value.UnsafeAddr</code></a>
and <a href="/pkg/reflect/#Value.Pointer"><code>Value.Pointer</code></a>
to eliminate the need to perform uintptr to unsafe.Pointer conversions at the callsite (as unsafe.Pointer rules require).
</p>
</dd>
</dl><!-- reflect -->
<dl id="syscall"><dt><a href="/pkg/syscall/">syscall</a></dt>
<dd>
<p><!-- CL 336550 -->
The new function <a href="/pkg/syscall/?GOOS=windows#SyscallN"><code>SyscallN</code></a>
has been introduced for Windows, allowing for calls with arbitrary number
of arguments. As results,
of arguments. As a result,
<a href="/pkg/syscall/?GOOS=windows#Syscall"><code>Syscall</code></a>,
<a href="/pkg/syscall/?GOOS=windows#Syscall6"><code>Syscall6</code></a>,
<a href="/pkg/syscall/?GOOS=windows#Syscall9"><code>Syscall9</code></a>,

View File

@ -1,6 +1,6 @@
<!--{
"Title": "The Go Programming Language Specification",
"Subtitle": "Version of Sep 16, 2021",
"Subtitle": "Version of Oct 15, 2021",
"Path": "/ref/spec"
}-->
@ -4598,7 +4598,8 @@ a <a href="#Blocks">block</a>. The following statements are terminating:
A <a href="#For_statements">"for" statement</a> in which:
<ul>
<li>there are no "break" statements referring to the "for" statement, and</li>
<li>the loop condition is absent.</li>
<li>the loop condition is absent, and</li>
<li>the "for" statement does not use a range clause.</li>
</ul>
</li>

View File

@ -931,3 +931,55 @@ func TestManyCalls(t *testing.T) {
t.Error(err)
}
}
// Issue 49288.
func TestPreemption(t *testing.T) {
if runtime.Compiler == "gccgo" {
t.Skip("skipping asynchronous preemption test with gccgo")
}
t.Parallel()
if !testWork {
defer func() {
os.Remove("testp8" + exeSuffix)
os.Remove("libgo8.a")
os.Remove("libgo8.h")
}()
}
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo8.a", "./libgo8")
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
checkLineComments(t, "libgo8.h")
ccArgs := append(cc, "-o", "testp8"+exeSuffix, "main8.c", "libgo8.a")
if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
argv := cmdToRun("./testp8")
cmd = exec.Command(argv[0], argv[1:]...)
var sb strings.Builder
cmd.Stdout = &sb
cmd.Stderr = &sb
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
timer := time.AfterFunc(time.Minute,
func() {
t.Error("test program timed out")
cmd.Process.Kill()
},
)
defer timer.Stop()
if err := cmd.Wait(); err != nil {
t.Log(sb.String())
t.Error(err)
}
}

View File

@ -0,0 +1,36 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "C"
import (
"os"
"runtime"
"sync/atomic"
)
var started int32
// Start a goroutine that loops forever.
func init() {
runtime.GOMAXPROCS(1)
go func() {
for {
atomic.StoreInt32(&started, 1)
}
}()
}
//export GoFunction8
func GoFunction8() {
for atomic.LoadInt32(&started) == 0 {
runtime.Gosched()
}
os.Exit(0)
}
func main() {
}

16
misc/cgo/testcarchive/testdata/main8.c vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test preemption.
#include <stdlib.h>
#include "libgo8.h"
int main() {
GoFunction8();
// That should have exited the program.
abort();
}

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux,!arm64 netbsd openbsd
// +build darwin dragonfly freebsd linux,!arm64,!riscv64 netbsd openbsd
package main

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux,arm64
// +build linux,arm64 linux,riscv64
package main

View File

@ -0,0 +1,66 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sanitizers_test
import (
"strings"
"testing"
)
func TestASAN(t *testing.T) {
goos, err := goEnv("GOOS")
if err != nil {
t.Fatal(err)
}
goarch, err := goEnv("GOARCH")
if err != nil {
t.Fatal(err)
}
// The asan tests require support for the -asan option.
if !aSanSupported(goos, goarch) {
t.Skipf("skipping on %s/%s; -asan option is not supported.", goos, goarch)
}
t.Parallel()
requireOvercommit(t)
config := configure("address")
config.skipIfCSanitizerBroken(t)
mustRun(t, config.goCmd("build", "std"))
cases := []struct {
src string
memoryAccessError string
}{
{src: "asan1_fail.go", memoryAccessError: "heap-use-after-free"},
{src: "asan2_fail.go", memoryAccessError: "heap-buffer-overflow"},
{src: "asan3_fail.go", memoryAccessError: "use-after-poison"},
{src: "asan4_fail.go", memoryAccessError: "use-after-poison"},
{src: "asan_useAfterReturn.go"},
}
for _, tc := range cases {
tc := tc
name := strings.TrimSuffix(tc.src, ".go")
t.Run(name, func(t *testing.T) {
t.Parallel()
dir := newTempDir(t)
defer dir.RemoveAll(t)
outPath := dir.Join(name)
mustRun(t, config.goCmd("build", "-o", outPath, srcPath(tc.src)))
cmd := hangProneCmd(outPath)
if tc.memoryAccessError != "" {
out, err := cmd.CombinedOutput()
if err != nil && strings.Contains(string(out), tc.memoryAccessError) {
return
}
t.Fatalf("%#q exited without expected memory access error\n%s; got failure\n%s", strings.Join(cmd.Args, " "), tc.memoryAccessError, out)
}
mustRun(t, cmd)
})
}
}

View File

@ -267,6 +267,9 @@ func configure(sanitizer string) *config {
c.ldFlags = append(c.ldFlags, "-fPIC", "-static-libtsan")
}
case "address":
c.goFlags = append(c.goFlags, "-asan")
default:
panic(fmt.Sprintf("unrecognized sanitizer: %q", sanitizer))
}
@ -450,3 +453,14 @@ func mSanSupported(goos, goarch string) bool {
return false
}
}
// aSanSupported is a copy of the function cmd/internal/sys.ASanSupported,
// because the internal pacakage can't be used here.
func aSanSupported(goos, goarch string) bool {
switch goos {
case "linux":
return goarch == "amd64" || goarch == "arm64"
default:
return false
}
}

View File

@ -0,0 +1,28 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
/*
#include <stdlib.h>
#include <stdio.h>
int *p;
int* test() {
p = (int *)malloc(2 * sizeof(int));
free(p);
return p;
}
*/
import "C"
import "fmt"
func main() {
// C passes Go an invalid pointer.
a := C.test()
// Use after free
*a = 2
// We shouldn't get here; asan should stop us first.
fmt.Println(*a)
}

View File

@ -0,0 +1,34 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
/*
#include <stdlib.h>
#include <stdio.h>
int *p;
int* f() {
int i;
p = (int *)malloc(5*sizeof(int));
for (i = 0; i < 5; i++) {
p[i] = i+10;
}
return p;
}
*/
import "C"
import (
"fmt"
"unsafe"
)
func main() {
a := C.f()
q5 := (*C.int)(unsafe.Add(unsafe.Pointer(a), 4*5))
// Access to C pointer out of bounds.
*q5 = 100
// We shouldn't get here; asan should stop us first.
fmt.Printf("q5: %d, %x\n", *q5, q5)
}

View File

@ -0,0 +1,23 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
/*
#include <stdlib.h>
#include <stdio.h>
void test(int *a) {
// Access Go pointer out of bounds.
int c = a[5]; // BOOM
// We shouldn't get here; asan should stop us first.
printf("a[5]=%d\n", c);
}
*/
import "C"
func main() {
cIntSlice := []C.int{200, 201, 203, 203, 204}
C.test(&cIntSlice[0])
}

View File

@ -0,0 +1,22 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
/*
#include <stdlib.h>
#include <stdio.h>
void test(int* a) {
// Access Go pointer out of bounds.
a[3] = 300; // BOOM
// We shouldn't get here; asan should stop us first.
printf("a[3]=%d\n", a[3]);
}*/
import "C"
func main() {
var cIntArray [2]C.int
C.test(&cIntArray[0]) // cIntArray is moved to heap.
}

View File

@ -0,0 +1,26 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
// The -fsanitize=address option of C compier can detect stack-use-after-return bugs.
// In the following program, the local variable 'local' was moved to heap by the Go
// compiler because foo() is returning the reference to 'local', and return stack of
// foo() will be invalid. Thus for main() to use the reference to 'local', the 'local'
// must be available even after foo() has finished. Therefore, Go has no such issue.
import "fmt"
var ptr *int
func main() {
foo()
fmt.Printf("ptr=%x, %v", *ptr, ptr)
}
func foo() {
var local int
local = 1
ptr = &local // local is moved to heap.
}

View File

@ -11,4 +11,4 @@ while [ -h "$SOURCE" ]; do
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
exec node "$DIR/wasm_exec.js" "$@"
exec node "$DIR/wasm_exec_node.js" "$@"

View File

@ -1,49 +1,19 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
"use strict";
(() => {
// Map multiple JavaScript environments to a single common API,
// preferring web standards over Node.js API.
//
// Environments considered:
// - Browsers
// - Node.js
// - Electron
// - Parcel
// - Webpack
if (typeof global !== "undefined") {
// global already exists
} else if (typeof window !== "undefined") {
window.global = window;
} else if (typeof self !== "undefined") {
self.global = self;
} else {
throw new Error("cannot export Go (neither global, window nor self is defined)");
}
if (!global.require && typeof require !== "undefined") {
global.require = require;
}
if (!global.fs && global.require) {
const fs = require("fs");
if (typeof fs === "object" && fs !== null && Object.keys(fs).length !== 0) {
global.fs = fs;
}
}
const enosys = () => {
const err = new Error("not implemented");
err.code = "ENOSYS";
return err;
};
if (!global.fs) {
if (!globalThis.fs) {
let outputBuf = "";
global.fs = {
globalThis.fs = {
constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1 }, // unused
writeSync(fd, buf) {
outputBuf += decoder.decode(buf);
@ -88,8 +58,8 @@
};
}
if (!global.process) {
global.process = {
if (!globalThis.process) {
globalThis.process = {
getuid() { return -1; },
getgid() { return -1; },
geteuid() { return -1; },
@ -103,47 +73,26 @@
}
}
if (!global.crypto && global.require) {
const nodeCrypto = require("crypto");
global.crypto = {
getRandomValues(b) {
nodeCrypto.randomFillSync(b);
},
};
}
if (!global.crypto) {
throw new Error("global.crypto is not available, polyfill required (getRandomValues only)");
if (!globalThis.crypto) {
throw new Error("globalThis.crypto is not available, polyfill required (crypto.getRandomValues only)");
}
if (!global.performance) {
global.performance = {
now() {
const [sec, nsec] = process.hrtime();
return sec * 1000 + nsec / 1000000;
},
};
if (!globalThis.performance) {
throw new Error("globalThis.performance is not available, polyfill required (performance.now only)");
}
if (!global.TextEncoder && global.require) {
global.TextEncoder = require("util").TextEncoder;
}
if (!global.TextEncoder) {
throw new Error("global.TextEncoder is not available, polyfill required");
if (!globalThis.TextEncoder) {
throw new Error("globalThis.TextEncoder is not available, polyfill required");
}
if (!global.TextDecoder && global.require) {
global.TextDecoder = require("util").TextDecoder;
if (!globalThis.TextDecoder) {
throw new Error("globalThis.TextDecoder is not available, polyfill required");
}
if (!global.TextDecoder) {
throw new Error("global.TextDecoder is not available, polyfill required");
}
// End of polyfills for common API.
const encoder = new TextEncoder("utf-8");
const decoder = new TextDecoder("utf-8");
global.Go = class {
globalThis.Go = class {
constructor() {
this.argv = ["js"];
this.env = {};
@ -518,7 +467,7 @@
null,
true,
false,
global,
globalThis,
this,
];
this._goRefCounts = new Array(this._values.length).fill(Infinity); // number of references that Go has to a JS value, indexed by reference id
@ -527,7 +476,7 @@
[null, 2],
[true, 3],
[false, 4],
[global, 5],
[globalThis, 5],
[this, 6],
]);
this._idPool = []; // unused ids that have been garbage collected
@ -570,9 +519,9 @@
// The linker guarantees global data starts from at least wasmMinDataAddr.
// Keep in sync with cmd/link/internal/ld/data.go:wasmMinDataAddr.
const wasmMinDataAddr = 4096 + 4096;
const wasmMinDataAddr = 4096 + 8192;
if (offset >= wasmMinDataAddr) {
throw new Error("command line too long");
throw new Error("total length of command line and environment variables exceeds limit");
}
this._inst.exports.run(argc, argv);
@ -602,36 +551,4 @@
};
}
}
if (
typeof module !== "undefined" &&
global.require &&
global.require.main === module &&
global.process &&
global.process.versions &&
!global.process.versions.electron
) {
if (process.argv.length < 3) {
console.error("usage: go_js_wasm_exec [wasm binary] [arguments]");
process.exit(1);
}
const go = new Go();
go.argv = process.argv.slice(2);
go.env = Object.assign({ TMPDIR: require("os").tmpdir() }, process.env);
go.exit = process.exit;
WebAssembly.instantiate(fs.readFileSync(process.argv[2]), go.importObject).then((result) => {
process.on("exit", (code) => { // Node.js exits if no event handler is pending
if (code === 0 && !go.exited) {
// deadlock, make Go print error and stack traces
go._pendingEvent = { id: 0 };
go._resume();
}
});
return go.run(result.instance);
}).catch((err) => {
console.error(err);
process.exit(1);
});
}
})();

View File

@ -0,0 +1,49 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
"use strict";
if (process.argv.length < 3) {
console.error("usage: go_js_wasm_exec [wasm binary] [arguments]");
process.exit(1);
}
globalThis.require = require;
globalThis.fs = require("fs");
globalThis.TextEncoder = require("util").TextEncoder;
globalThis.TextDecoder = require("util").TextDecoder;
globalThis.performance = {
now() {
const [sec, nsec] = process.hrtime();
return sec * 1000 + nsec / 1000000;
},
};
const crypto = require("crypto");
globalThis.crypto = {
getRandomValues(b) {
crypto.randomFillSync(b);
},
};
require("./wasm_exec");
const go = new Go();
go.argv = process.argv.slice(2);
go.env = Object.assign({ TMPDIR: require("os").tmpdir() }, process.env);
go.exit = process.exit;
WebAssembly.instantiate(fs.readFileSync(process.argv[2]), go.importObject).then((result) => {
process.on("exit", (code) => { // Node.js exits if no event handler is pending
if (code === 0 && !go.exited) {
// deadlock, make Go print error and stack traces
go._pendingEvent = { id: 0 };
go._resume();
}
});
return go.run(result.instance);
}).catch((err) => {
console.error(err);
process.exit(1);
});

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || linux || dragonfly || openbsd || solaris
// +build aix linux dragonfly openbsd solaris
package tar

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build darwin || freebsd || netbsd
// +build darwin freebsd netbsd
package tar

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || linux || darwin || dragonfly || freebsd || openbsd || netbsd || solaris
// +build aix linux darwin dragonfly freebsd openbsd netbsd solaris
package tar

View File

@ -741,6 +741,9 @@ func (r *Reader) initFileList() {
for _, file := range r.File {
isDir := len(file.Name) > 0 && file.Name[len(file.Name)-1] == '/'
name := toValidName(file.Name)
if name == "" {
continue
}
for dir := path.Dir(name); dir != "."; dir = path.Dir(dir) {
dirs[dir] = true
}
@ -782,8 +785,11 @@ func fileEntryLess(x, y string) bool {
func (r *Reader) Open(name string) (fs.File, error) {
r.initFileList()
if !fs.ValidPath(name) {
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid}
}
e := r.openLookup(name)
if e == nil || !fs.ValidPath(name) {
if e == nil {
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
}
if e.isDir {
@ -797,7 +803,7 @@ func (r *Reader) Open(name string) (fs.File, error) {
}
func split(name string) (dir, elem string, isDir bool) {
if name[len(name)-1] == '/' {
if len(name) > 0 && name[len(name)-1] == '/' {
isDir = true
name = name[:len(name)-1]
}

View File

@ -13,6 +13,7 @@ import (
"io/fs"
"os"
"path/filepath"
"reflect"
"regexp"
"strings"
"testing"
@ -1202,6 +1203,15 @@ func TestCVE202127919(t *testing.T) {
if err != nil {
t.Errorf("Error reading file: %v", err)
}
if len(r.File) != 1 {
t.Fatalf("No entries in the file list")
}
if r.File[0].Name != "../test.txt" {
t.Errorf("Unexpected entry name: %s", r.File[0].Name)
}
if _, err := r.File[0].Open(); err != nil {
t.Errorf("Error opening file: %v", err)
}
}
func TestReadDataDescriptor(t *testing.T) {
@ -1402,3 +1412,121 @@ func TestCVE202139293(t *testing.T) {
t.Fatalf("unexpected error, got: %v, want: %v", err, ErrFormat)
}
}
func TestCVE202141772(t *testing.T) {
// Archive contains a file whose name is exclusively made up of '/', '\'
// characters, or "../", "..\" paths, which would previously cause a panic.
//
// Length Method Size Cmpr Date Time CRC-32 Name
// -------- ------ ------- ---- ---------- ----- -------- ----
// 0 Stored 0 0% 08-05-2021 18:32 00000000 /
// 0 Stored 0 0% 09-14-2021 12:59 00000000 //
// 0 Stored 0 0% 09-14-2021 12:59 00000000 \
// 11 Stored 11 0% 09-14-2021 13:04 0d4a1185 /test.txt
// -------- ------- --- -------
// 11 11 0% 4 files
data := []byte{
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x08,
0x00, 0x00, 0x06, 0x94, 0x05, 0x53, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2f, 0x50,
0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00,
0x00, 0x78, 0x67, 0x2e, 0x53, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x02, 0x00, 0x00, 0x00, 0x2f, 0x2f, 0x50,
0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00,
0x00, 0x78, 0x67, 0x2e, 0x53, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x00, 0x00, 0x00, 0x5c, 0x50, 0x4b,
0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
0x91, 0x68, 0x2e, 0x53, 0x85, 0x11, 0x4a, 0x0d,
0x0b, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00,
0x09, 0x00, 0x00, 0x00, 0x2f, 0x74, 0x65, 0x73,
0x74, 0x2e, 0x74, 0x78, 0x74, 0x68, 0x65, 0x6c,
0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64,
0x50, 0x4b, 0x01, 0x02, 0x14, 0x03, 0x0a, 0x00,
0x00, 0x08, 0x00, 0x00, 0x06, 0x94, 0x05, 0x53,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
0xed, 0x41, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x50,
0x4b, 0x01, 0x02, 0x3f, 0x00, 0x0a, 0x00, 0x00,
0x00, 0x00, 0x00, 0x78, 0x67, 0x2e, 0x53, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x02, 0x00, 0x24, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
0x00, 0x1f, 0x00, 0x00, 0x00, 0x2f, 0x2f, 0x0a,
0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x18, 0x00, 0x93, 0x98, 0x25, 0x57, 0x25,
0xa9, 0xd7, 0x01, 0x93, 0x98, 0x25, 0x57, 0x25,
0xa9, 0xd7, 0x01, 0x93, 0x98, 0x25, 0x57, 0x25,
0xa9, 0xd7, 0x01, 0x50, 0x4b, 0x01, 0x02, 0x3f,
0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78,
0x67, 0x2e, 0x53, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x20, 0x00, 0x00, 0x00, 0x3f, 0x00, 0x00,
0x00, 0x5c, 0x0a, 0x00, 0x20, 0x00, 0x00, 0x00,
0x00, 0x00, 0x01, 0x00, 0x18, 0x00, 0x93, 0x98,
0x25, 0x57, 0x25, 0xa9, 0xd7, 0x01, 0x93, 0x98,
0x25, 0x57, 0x25, 0xa9, 0xd7, 0x01, 0x93, 0x98,
0x25, 0x57, 0x25, 0xa9, 0xd7, 0x01, 0x50, 0x4b,
0x01, 0x02, 0x3f, 0x00, 0x0a, 0x00, 0x00, 0x00,
0x00, 0x00, 0x91, 0x68, 0x2e, 0x53, 0x85, 0x11,
0x4a, 0x0d, 0x0b, 0x00, 0x00, 0x00, 0x0b, 0x00,
0x00, 0x00, 0x09, 0x00, 0x24, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
0x5e, 0x00, 0x00, 0x00, 0x2f, 0x74, 0x65, 0x73,
0x74, 0x2e, 0x74, 0x78, 0x74, 0x0a, 0x00, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x18,
0x00, 0xa9, 0x80, 0x51, 0x01, 0x26, 0xa9, 0xd7,
0x01, 0x31, 0xd1, 0x57, 0x01, 0x26, 0xa9, 0xd7,
0x01, 0xdf, 0x48, 0x85, 0xf9, 0x25, 0xa9, 0xd7,
0x01, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00,
0x00, 0x04, 0x00, 0x04, 0x00, 0x31, 0x01, 0x00,
0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
}
r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
if err != nil {
t.Fatalf("Error reading the archive: %v", err)
}
entryNames := []string{`/`, `//`, `\`, `/test.txt`}
var names []string
for _, f := range r.File {
names = append(names, f.Name)
if _, err := f.Open(); err != nil {
t.Errorf("Error opening %q: %v", f.Name, err)
}
if _, err := r.Open(f.Name); err == nil {
t.Errorf("Opening %q with fs.FS API succeeded", f.Name)
}
}
if !reflect.DeepEqual(names, entryNames) {
t.Errorf("Unexpected file entries: %q", names)
}
if _, err := r.Open(""); err == nil {
t.Errorf("Opening %q with fs.FS API succeeded", "")
}
if _, err := r.Open("test.txt"); err != nil {
t.Errorf("Error opening %q with fs.FS API: %v", "test.txt", err)
}
dirEntries, err := fs.ReadDir(r, ".")
if err != nil {
t.Fatalf("Error reading the root directory: %v", err)
}
if len(dirEntries) != 1 || dirEntries[0].Name() != "test.txt" {
t.Errorf("Unexpected directory entries")
for _, dirEntry := range dirEntries {
_, err := r.Open(dirEntry.Name())
t.Logf("%q (Open error: %v)", dirEntry.Name(), err)
}
t.FailNow()
}
info, err := dirEntries[0].Info()
if err != nil {
t.Fatalf("Error reading info entry: %v", err)
}
if name := info.Name(); name != "test.txt" {
t.Errorf("Inconsistent name in info entry: %v", name)
}
}

View File

@ -745,19 +745,14 @@ func (b *Writer) WriteString(s string) (int, error) {
}
// ReadFrom implements io.ReaderFrom. If the underlying writer
// supports the ReadFrom method, and b has no buffered data yet,
// this calls the underlying ReadFrom without buffering.
// supports the ReadFrom method, this calls the underlying ReadFrom.
// If there is buffered data and an underlying ReadFrom, this fills
// the buffer and writes it before calling ReadFrom.
func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
if b.err != nil {
return 0, b.err
}
if b.Buffered() == 0 {
if w, ok := b.wr.(io.ReaderFrom); ok {
n, err = w.ReadFrom(r)
b.err = err
return n, err
}
}
readerFrom, readerFromOK := b.wr.(io.ReaderFrom)
var m int
for {
if b.Available() == 0 {
@ -765,6 +760,12 @@ func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
return n, err1
}
}
if readerFromOK && b.Buffered() == 0 {
nn, err := readerFrom.ReadFrom(r)
b.err = err
n += nn
return n, err
}
nr := 0
for nr < maxConsecutiveEmptyReads {
m, err = r.Read(b.buf[b.n:])

View File

@ -1351,6 +1351,54 @@ func TestWriterReadFromErrNoProgress(t *testing.T) {
}
}
type readFromWriter struct {
buf []byte
writeBytes int
readFromBytes int
}
func (w *readFromWriter) Write(p []byte) (int, error) {
w.buf = append(w.buf, p...)
w.writeBytes += len(p)
return len(p), nil
}
func (w *readFromWriter) ReadFrom(r io.Reader) (int64, error) {
b, err := io.ReadAll(r)
w.buf = append(w.buf, b...)
w.readFromBytes += len(b)
return int64(len(b)), err
}
// Test that calling (*Writer).ReadFrom with a partially-filled buffer
// fills the buffer before switching over to ReadFrom.
func TestWriterReadFromWithBufferedData(t *testing.T) {
const bufsize = 16
input := createTestInput(64)
rfw := &readFromWriter{}
w := NewWriterSize(rfw, bufsize)
const writeSize = 8
if n, err := w.Write(input[:writeSize]); n != writeSize || err != nil {
t.Errorf("w.Write(%v bytes) = %v, %v; want %v, nil", writeSize, n, err, writeSize)
}
n, err := w.ReadFrom(bytes.NewReader(input[writeSize:]))
if wantn := len(input[writeSize:]); int(n) != wantn || err != nil {
t.Errorf("io.Copy(w, %v bytes) = %v, %v; want %v, nil", wantn, n, err, wantn)
}
if err := w.Flush(); err != nil {
t.Errorf("w.Flush() = %v, want nil", err)
}
if got, want := rfw.writeBytes, bufsize; got != want {
t.Errorf("wrote %v bytes with Write, want %v", got, want)
}
if got, want := rfw.readFromBytes, len(input)-bufsize; got != want {
t.Errorf("wrote %v bytes with ReadFrom, want %v", got, want)
}
}
func TestReadZero(t *testing.T) {
for _, size := range []int{100, 2} {
t.Run(fmt.Sprintf("bufsize=%d", size), func(t *testing.T) {
@ -1472,7 +1520,7 @@ func TestReaderDiscard(t *testing.T) {
wantBuffered: 0,
},
// Any error from filling shouldn't show up until we
// get past the valid bytes. Here we return we return 5 valid bytes at the same time
// get past the valid bytes. Here we return 5 valid bytes at the same time
// as an error, but test that we don't see the error from Discard.
{
name: "fill error, discard less",

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//
//go:build linux
// +build linux
package bytes_test
@ -66,7 +65,11 @@ func TestIndexByteNearPageBoundary(t *testing.T) {
func TestIndexNearPageBoundary(t *testing.T) {
t.Parallel()
var q [64]byte
q := dangerousSlice(t)
if len(q) > 64 {
// Only worry about when we're near the end of a page.
q = q[len(q)-64:]
}
b := dangerousSlice(t)
if len(b) > 256 {
// Only worry about when we're near the end of a page.
@ -82,4 +85,16 @@ func TestIndexNearPageBoundary(t *testing.T) {
}
q[j-1] = 0
}
// Test differing alignments and sizes of q which always end on a page boundary.
q[len(q)-1] = 1 // difference is only found on the last byte
for j := 0; j < len(q); j++ {
for i := range b {
idx := Index(b[i:], q[j:])
if idx != -1 {
t.Fatalf("Index(b[%d:], q[%d:])=%d, want -1\n", i, j, idx)
}
}
}
q[len(q)-1] = 0
}

View File

@ -21,7 +21,7 @@ func Equal(a, b []byte) bool {
}
// Compare returns an integer comparing two byte slices lexicographically.
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
// The result will be 0 if a == b, -1 if a < b, and +1 if a > b.
// A nil argument is equivalent to an empty slice.
func Compare(a, b []byte) int {
return bytealg.Compare(a, b)
@ -699,7 +699,7 @@ func ToValidUTF8(s, replacement []byte) []byte {
if c < utf8.RuneSelf {
i++
invalid = false
b = append(b, byte(c))
b = append(b, c)
continue
}
_, wid := utf8.DecodeRune(s[i:])

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
// +build ignore
// The run program is invoked via the dist tool.
// To invoke manually: go tool dist test -run api --no-rebuild

View File

@ -50,7 +50,7 @@ func nilRegisterNumber(name string, n int16) (int16, bool) {
// Set configures the architecture specified by GOARCH and returns its representation.
// It returns nil if GOARCH is not recognized.
func Set(GOARCH string) *Arch {
func Set(GOARCH string, shared bool) *Arch {
switch GOARCH {
case "386":
return archX86(&x86.Link386)
@ -73,7 +73,7 @@ func Set(GOARCH string) *Arch {
case "ppc64le":
return archPPC64(&ppc64.Linkppc64le)
case "riscv64":
return archRISCV64()
return archRISCV64(shared)
case "s390x":
return archS390x()
case "wasm":
@ -378,6 +378,9 @@ func archPPC64(linkArch *obj.LinkArch) *Arch {
for i := ppc64.REG_MSR; i <= ppc64.REG_CR; i++ {
register[obj.Rconv(i)] = int16(i)
}
for i := ppc64.REG_CR0LT; i <= ppc64.REG_CR7SO; i++ {
register[obj.Rconv(i)] = int16(i)
}
register["CR"] = ppc64.REG_CR
register["XER"] = ppc64.REG_XER
register["LR"] = ppc64.REG_LR
@ -538,12 +541,18 @@ func archMips64(linkArch *obj.LinkArch) *Arch {
}
}
func archRISCV64() *Arch {
func archRISCV64(shared bool) *Arch {
register := make(map[string]int16)
// Standard register names.
for i := riscv.REG_X0; i <= riscv.REG_X31; i++ {
if i == riscv.REG_G {
// Disallow X3 in shared mode, as this will likely be used as the
// GP register, which could result in problems in non-Go code,
// including signal handlers.
if shared && i == riscv.REG_GP {
continue
}
if i == riscv.REG_TP || i == riscv.REG_G {
continue
}
name := fmt.Sprintf("X%d", i-riscv.REG_X0)

View File

@ -19,7 +19,7 @@ import (
func setArch(goarch string) (*arch.Arch, *obj.Link) {
buildcfg.GOOS = "linux" // obj can handle this OS for all architectures.
buildcfg.GOARCH = goarch
architecture := arch.Set(goarch)
architecture := arch.Set(goarch, false)
if architecture == nil {
panic("asm: unrecognized architecture " + goarch)
}

View File

@ -342,14 +342,14 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
NOP F2
NOP $4
CRAND CR1, CR2, CR3 // 4c620a02
CRANDN CR1, CR2, CR3 // 4c620902
CREQV CR1, CR2, CR3 // 4c620a42
CRNAND CR1, CR2, CR3 // 4c6209c2
CRNOR CR1, CR2, CR3 // 4c620842
CROR CR1, CR2, CR3 // 4c620b82
CRORN CR1, CR2, CR3 // 4c620b42
CRXOR CR1, CR2, CR3 // 4c620982
CRAND CR0GT, CR0EQ, CR0SO // 4c620a02
CRANDN CR0GT, CR0EQ, CR0SO // 4c620902
CREQV CR0GT, CR0EQ, CR0SO // 4c620a42
CRNAND CR0GT, CR0EQ, CR0SO // 4c6209c2
CRNOR CR0GT, CR0EQ, CR0SO // 4c620842
CROR CR0GT, CR0EQ, CR0SO // 4c620b82
CRORN CR0GT, CR0EQ, CR0SO // 4c620b42
CRXOR CR0GT, CR0EQ, CR0SO // 4c620982
ISEL $1, R3, R4, R5 // 7ca3205e
ISEL $0, R3, R4, R5 // 7ca3201e

View File

@ -28,6 +28,10 @@ var (
CompilingRuntime = flag.Bool("compiling-runtime", false, "source to be compiled is part of the Go runtime")
)
var DebugFlags struct {
MayMoreStack string `help:"call named function before all stack growth checks"`
}
var (
D MultiFlag
I MultiFlag
@ -39,6 +43,7 @@ func init() {
flag.Var(&D, "D", "predefined symbol with optional simple value -D=identifier=value; can be set multiple times")
flag.Var(&I, "I", "include directory; can be set multiple times")
flag.BoolVar(&DebugV, "v", false, "print debug output")
flag.Var(objabi.NewDebugFlag(&DebugFlags, nil), "d", "enable debugging settings; try -d help")
objabi.AddVersionFlag() // -V
objabi.Flagcount("S", "print assembly and machine code", &PrintOut)
}

View File

@ -29,19 +29,20 @@ func main() {
buildcfg.Check()
GOARCH := buildcfg.GOARCH
architecture := arch.Set(GOARCH)
flags.Parse()
architecture := arch.Set(GOARCH, *flags.Shared || *flags.Dynlink)
if architecture == nil {
log.Fatalf("unrecognized architecture %s", GOARCH)
}
flags.Parse()
ctxt := obj.Linknew(architecture.LinkArch)
ctxt.Debugasm = flags.PrintOut
ctxt.Debugvlog = flags.DebugV
ctxt.Flag_dynlink = *flags.Dynlink
ctxt.Flag_linkshared = *flags.Linkshared
ctxt.Flag_shared = *flags.Shared || *flags.Dynlink
ctxt.Flag_maymorestack = flags.DebugFlags.MayMoreStack
ctxt.IsAsm = true
ctxt.Pkgpath = *flags.Importpath
switch *flags.Spectre {

View File

@ -29,7 +29,7 @@ import (
"unicode"
"unicode/utf8"
"cmd/internal/str"
"cmd/internal/quoted"
)
var debugDefine = flag.Bool("debug-define", false, "print relevant #defines")
@ -1568,7 +1568,7 @@ func checkGCCBaseCmd() ([]string, error) {
if value == "" {
value = defaultCC(goos, goarch)
}
args, err := str.SplitQuotedFields(value)
args, err := quoted.Split(value)
if err != nil {
return nil, err
}

View File

@ -44,6 +44,8 @@ Flags:
Print compiler version and exit.
-asmhdr file
Write assembly header to file.
-asan
Insert calls to C/C++ address sanitizer.
-buildid id
Record id as the build id in the export metadata.
-blockprofile file

View File

@ -780,11 +780,11 @@ func (state *assignState) assignParamOrReturn(pt *types.Type, n types.Object, is
}
// ComputePadding returns a list of "post element" padding values in
// the case where we have a structure being passed in registers. Give
// a param assignment corresponding to a struct, it returns a list of
// contaning padding values for each field, e.g. the Kth element in
// the case where we have a structure being passed in registers. Given
// a param assignment corresponding to a struct, it returns a list
// containing padding values for each field, e.g. the Kth element in
// the list is the amount of padding between field K and the following
// field. For things that are not struct (or structs without padding)
// field. For things that are not structs (or structs without padding)
// it returns a list of zeros. Example:
//
// type small struct {
@ -796,8 +796,8 @@ func (state *assignState) assignParamOrReturn(pt *types.Type, n types.Object, is
//
// For this struct we would return a list [0, 1, 0, 0], meaning that
// we have one byte of padding after the second field, and no bytes of
// padding after any of the other fields. Input parameter "storage"
// is with enough capacity to accommodate padding elements for
// padding after any of the other fields. Input parameter "storage" is
// a slice with enough capacity to accommodate padding elements for
// the architected register set in question.
func (pa *ABIParamAssignment) ComputePadding(storage []uint64) []uint64 {
nr := len(pa.Registers)

View File

@ -772,7 +772,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
p.To.Reg = x
case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload, ssa.OpAMD64MOVOload:
case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVOload,
ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload,
ssa.OpAMD64MOVBEQload, ssa.OpAMD64MOVBELload:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
@ -788,7 +790,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
ssa.OpAMD64ADDQmodify, ssa.OpAMD64SUBQmodify, ssa.OpAMD64ANDQmodify, ssa.OpAMD64ORQmodify, ssa.OpAMD64XORQmodify,
ssa.OpAMD64ADDLmodify, ssa.OpAMD64SUBLmodify, ssa.OpAMD64ANDLmodify, ssa.OpAMD64ORLmodify, ssa.OpAMD64XORLmodify:
ssa.OpAMD64ADDLmodify, ssa.OpAMD64SUBLmodify, ssa.OpAMD64ANDLmodify, ssa.OpAMD64ORLmodify, ssa.OpAMD64XORLmodify,
ssa.OpAMD64MOVBEQstore, ssa.OpAMD64MOVBELstore:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()

View File

@ -53,7 +53,9 @@ func TestGoAMD64v1(t *testing.T) {
opcodes := map[string]bool{}
var features []string
for feature, opcodeList := range featureToOpcodes {
features = append(features, fmt.Sprintf("cpu.%s=off", feature))
if runtimeFeatures[feature] {
features = append(features, fmt.Sprintf("cpu.%s=off", feature))
}
for _, op := range opcodeList {
opcodes[op] = true
}
@ -204,14 +206,28 @@ func clobber(t *testing.T, src string, dst *os.File, opcodes map[string]bool) {
f.Close()
}
func setOf(keys ...string) map[string]bool {
m := make(map[string]bool, len(keys))
for _, key := range keys {
m[key] = true
}
return m
}
var runtimeFeatures = setOf(
"adx", "aes", "avx", "avx2", "bmi1", "bmi2", "erms", "fma",
"pclmulqdq", "popcnt", "rdtscp", "sse3", "sse41", "sse42", "ssse3",
)
var featureToOpcodes = map[string][]string{
// Note: we include *q, *l, and plain opcodes here.
// go tool objdump doesn't include a [QL] on popcnt instructions, until CL 351889
// native objdump doesn't include [QL] on linux.
"popcnt": []string{"popcntq", "popcntl", "popcnt"},
"bmi1": []string{"andnq", "andnl", "andn", "blsiq", "blsil", "blsi", "blsmskq", "blsmskl", "blsmsk", "blsrq", "blsrl", "blsr", "tzcntq", "tzcntl", "tzcnt"},
"sse41": []string{"roundsd"},
"fma": []string{"vfmadd231sd"},
"popcnt": {"popcntq", "popcntl", "popcnt"},
"bmi1": {"andnq", "andnl", "andn", "blsiq", "blsil", "blsi", "blsmskq", "blsmskl", "blsmsk", "blsrq", "blsrl", "blsr", "tzcntq", "tzcntl", "tzcnt"},
"sse41": {"roundsd"},
"fma": {"vfmadd231sd"},
"movbe": {"movbeqq", "movbeq", "movbell", "movbel", "movbe"},
}
// Test to use POPCNT instruction, if available
@ -364,5 +380,4 @@ func TestFMA(t *testing.T) {
t.Errorf("FMA(%f,%f,%f) = %f, want %f", tt.x, tt.y, tt.z, got, tt.want)
}
}
}

View File

@ -67,6 +67,7 @@ var NoInstrumentPkgs = []string{
"runtime",
"runtime/race",
"runtime/msan",
"runtime/asan",
"internal/cpu",
}

View File

@ -6,15 +6,6 @@
package base
import (
"fmt"
"log"
"os"
"reflect"
"strconv"
"strings"
)
// Debug holds the parsed debugging configuration values.
var Debug DebugFlags
@ -26,7 +17,7 @@ var Debug DebugFlags
// Each setting is name=value; for ints, name is short for name=1.
type DebugFlags struct {
Append int `help:"print information about append compilation"`
Checkptr int `help:"instrument unsafe pointer conversions"`
Checkptr int `help:"instrument unsafe pointer conversions\n0: instrumentation disabled\n1: conversions involving unsafe.Pointer are instrumented\n2: conversions to unsafe.Pointer force heap allocation"`
Closure int `help:"print information about closure compilation"`
DclStack int `help:"run internal dclstack check"`
Defer int `help:"print information about defer compilation"`
@ -40,7 +31,7 @@ type DebugFlags struct {
LocationLists int `help:"print information about DWARF location list creation"`
Nil int `help:"print information about nil checks"`
NoOpenDefer int `help:"disable open-coded defers"`
PCTab string `help:"print named pc-value table"`
PCTab string `help:"print named pc-value table\nOne of: pctospadj, pctofile, pctoline, pctoinline, pctopcdata"`
Panic int `help:"show all compiler panics"`
Slice int `help:"print information about slice compilation"`
SoftFloat int `help:"force compiler to emit soft-float code"`
@ -51,142 +42,12 @@ type DebugFlags struct {
UnifiedQuirks int `help:"enable unified IR construction's quirks mode"`
WB int `help:"print information about write barriers"`
ABIWrap int `help:"print information about ABI wrapper generation"`
MayMoreStack string `help:"call named function before all stack growth checks"`
any bool // set when any of the values have been set
}
// Any reports whether any of the debug flags have been set.
func (d *DebugFlags) Any() bool { return d.any }
type debugField struct {
name string
help string
val interface{} // *int or *string
}
var debugTab []debugField
func init() {
v := reflect.ValueOf(&Debug).Elem()
t := v.Type()
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Name == "any" {
continue
}
name := strings.ToLower(f.Name)
help := f.Tag.Get("help")
if help == "" {
panic(fmt.Sprintf("base.Debug.%s is missing help text", f.Name))
}
ptr := v.Field(i).Addr().Interface()
switch ptr.(type) {
default:
panic(fmt.Sprintf("base.Debug.%s has invalid type %v (must be int or string)", f.Name, f.Type))
case *int, *string:
// ok
}
debugTab = append(debugTab, debugField{name, help, ptr})
}
Any bool // set when any of the debug flags have been set
}
// DebugSSA is called to set a -d ssa/... option.
// If nil, those options are reported as invalid options.
// If DebugSSA returns a non-empty string, that text is reported as a compiler error.
var DebugSSA func(phase, flag string, val int, valString string) string
// parseDebug parses the -d debug string argument.
func parseDebug(debugstr string) {
// parse -d argument
if debugstr == "" {
return
}
Debug.any = true
Split:
for _, name := range strings.Split(debugstr, ",") {
if name == "" {
continue
}
// display help about the -d option itself and quit
if name == "help" {
fmt.Print(debugHelpHeader)
maxLen := len("ssa/help")
for _, t := range debugTab {
if len(t.name) > maxLen {
maxLen = len(t.name)
}
}
for _, t := range debugTab {
fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help)
}
// ssa options have their own help
fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging")
fmt.Print(debugHelpFooter)
os.Exit(0)
}
val, valstring, haveInt := 1, "", true
if i := strings.IndexAny(name, "=:"); i >= 0 {
var err error
name, valstring = name[:i], name[i+1:]
val, err = strconv.Atoi(valstring)
if err != nil {
val, haveInt = 1, false
}
}
for _, t := range debugTab {
if t.name != name {
continue
}
switch vp := t.val.(type) {
case nil:
// Ignore
case *string:
*vp = valstring
case *int:
if !haveInt {
log.Fatalf("invalid debug value %v", name)
}
*vp = val
default:
panic("bad debugtab type")
}
continue Split
}
// special case for ssa for now
if DebugSSA != nil && strings.HasPrefix(name, "ssa/") {
// expect form ssa/phase/flag
// e.g. -d=ssa/generic_cse/time
// _ in phase name also matches space
phase := name[4:]
flag := "debug" // default flag is debug
if i := strings.Index(phase, "/"); i >= 0 {
flag = phase[i+1:]
phase = phase[:i]
}
err := DebugSSA(phase, flag, val, valstring)
if err != "" {
log.Fatalf(err)
}
continue Split
}
log.Fatalf("unknown debug key -d %s\n", name)
}
}
const debugHelpHeader = `usage: -d arg[,arg]* and arg is <key>[=<value>]
<key> is one of:
`
const debugHelpFooter = `
<value> is key-specific.
Key "checkptr" supports values:
"0": instrumentation disabled
"1": conversions involving unsafe.Pointer are instrumented
"2": conversions to unsafe.Pointer force heap allocation
Key "pctab" supports values:
"pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata"
`

View File

@ -64,19 +64,19 @@ type CmdFlags struct {
// V is added by objabi.AddVersionFlag
W CountFlag "help:\"debug parse tree after type checking\""
LowerC int "help:\"concurrency during compilation (1 means no concurrency)\""
LowerD func(string) "help:\"enable debugging settings; try -d help\""
LowerE CountFlag "help:\"no limit on number of errors reported\""
LowerH CountFlag "help:\"halt on error\""
LowerJ CountFlag "help:\"debug runtime-initialized variables\""
LowerL CountFlag "help:\"disable inlining\""
LowerM CountFlag "help:\"print optimization decisions\""
LowerO string "help:\"write output to `file`\""
LowerP *string "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below
LowerR CountFlag "help:\"debug generated wrappers\""
LowerT bool "help:\"enable tracing for debugging the compiler\""
LowerW CountFlag "help:\"debug type checking\""
LowerV *bool "help:\"increase debug verbosity\""
LowerC int "help:\"concurrency during compilation (1 means no concurrency)\""
LowerD flag.Value "help:\"enable debugging settings; try -d help\""
LowerE CountFlag "help:\"no limit on number of errors reported\""
LowerH CountFlag "help:\"halt on error\""
LowerJ CountFlag "help:\"debug runtime-initialized variables\""
LowerL CountFlag "help:\"disable inlining\""
LowerM CountFlag "help:\"print optimization decisions\""
LowerO string "help:\"write output to `file`\""
LowerP *string "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below
LowerR CountFlag "help:\"debug generated wrappers\""
LowerT bool "help:\"enable tracing for debugging the compiler\""
LowerW CountFlag "help:\"debug type checking\""
LowerV *bool "help:\"increase debug verbosity\""
// Special characters
Percent int "flag:\"%\" help:\"debug non-static initializers\""
@ -84,6 +84,7 @@ type CmdFlags struct {
// Longer names
AsmHdr string "help:\"write assembly header to `file`\""
ASan bool "help:\"build code compatible with C/C++ address sanitizer\""
Bench string "help:\"append benchmark times to `file`\""
BlockProfile string "help:\"write block profile to `file`\""
BuildID string "help:\"record `id` as the build id in the export metadata\""
@ -108,7 +109,7 @@ type CmdFlags struct {
Live CountFlag "help:\"debug liveness analysis\""
MSan bool "help:\"build code compatible with C/C++ memory sanitizer\""
MemProfile string "help:\"write memory profile to `file`\""
MemProfileRate int64 "help:\"set runtime.MemProfileRate to `rate`\""
MemProfileRate int "help:\"set runtime.MemProfileRate to `rate`\""
MutexProfile string "help:\"write mutex profile to `file`\""
NoLocalImports bool "help:\"reject local (relative) imports\""
Pack bool "help:\"write to file.a instead of file.o\""
@ -144,7 +145,7 @@ func ParseFlags() {
Flag.I = addImportDir
Flag.LowerC = 1
Flag.LowerD = parseDebug
Flag.LowerD = objabi.NewDebugFlag(&Debug, DebugSSA)
Flag.LowerP = &Ctxt.Pkgpath
Flag.LowerV = &Ctxt.Debugvlog
@ -177,6 +178,9 @@ func ParseFlags() {
if Flag.MSan && !sys.MSanSupported(buildcfg.GOOS, buildcfg.GOARCH) {
log.Fatalf("%s/%s does not support -msan", buildcfg.GOOS, buildcfg.GOARCH)
}
if Flag.ASan && !sys.ASanSupported(buildcfg.GOOS, buildcfg.GOARCH) {
log.Fatalf("%s/%s does not support -asan", buildcfg.GOOS, buildcfg.GOARCH)
}
if Flag.Race && !sys.RaceDetectorSupported(buildcfg.GOOS, buildcfg.GOARCH) {
log.Fatalf("%s/%s does not support -race", buildcfg.GOOS, buildcfg.GOARCH)
}
@ -188,6 +192,7 @@ func ParseFlags() {
Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared
Ctxt.Flag_optimize = Flag.N == 0
Ctxt.Debugasm = int(Flag.S)
Ctxt.Flag_maymorestack = Debug.MayMoreStack
if flag.NArg() < 1 {
usage()
@ -217,12 +222,16 @@ func ParseFlags() {
}
Flag.LowerO = p + suffix
}
if Flag.Race && Flag.MSan {
switch {
case Flag.Race && Flag.MSan:
log.Fatal("cannot use both -race and -msan")
case Flag.Race && Flag.ASan:
log.Fatal("cannot use both -race and -asan")
case Flag.MSan && Flag.ASan:
log.Fatal("cannot use both -msan and -asan")
}
if Flag.Race || Flag.MSan {
// -race and -msan imply -d=checkptr for now.
if Flag.Race || Flag.MSan || Flag.ASan {
// -race, -msan and -asan imply -d=checkptr for now.
if Debug.Checkptr == -1 { // if not set explicitly
Debug.Checkptr = 1
}
@ -322,6 +331,12 @@ func registerFlags() {
case funcType:
f := v.Field(i).Interface().(func(string))
objabi.Flagfn1(name, help, f)
default:
if val, ok := v.Field(i).Interface().(flag.Value); ok {
flag.Var(val, name, help)
} else {
panic(fmt.Sprintf("base.Flag.%s has unexpected type %s", f.Name, f.Type))
}
}
}
}
@ -349,7 +364,7 @@ func concurrentBackendAllowed() bool {
// while writing the object file, and that is non-concurrent.
// Adding Debug_vlog, however, causes Debug.S to also print
// while flushing the plist, which happens concurrently.
if Ctxt.Debugvlog || Debug.Any() || Flag.Live > 0 {
if Ctxt.Debugvlog || Debug.Any || Flag.Live > 0 {
return false
}
// TODO: Test and delete this condition.

View File

@ -128,10 +128,21 @@ func (bv BitVec) IsEmpty() bool {
return true
}
func (bv BitVec) Count() int {
n := 0
for _, x := range bv.B {
n += bits.OnesCount32(x)
}
return n
}
func (bv BitVec) Not() {
for i, x := range bv.B {
bv.B[i] = ^x
}
if bv.N%wordBits != 0 {
bv.B[len(bv.B)-1] &= 1<<uint(bv.N%wordBits) - 1 // clear bits past N in the last word
}
}
// union

View File

@ -333,11 +333,32 @@ func (e *escape) rewriteArgument(argp *ir.Node, init *ir.Nodes, call ir.Node, fn
}
}
// Peel away any slice lits.
// Peel away any slice literals for better escape analyze
// them. For example:
//
// go F([]int{a, b})
//
// If F doesn't escape its arguments, then the slice can
// be allocated on the new goroutine's stack.
//
// For variadic functions, the compiler has already rewritten:
//
// f(a, b, c)
//
// to:
//
// f([]T{a, b, c}...)
//
// So we need to look into slice elements to handle uintptr(ptr)
// arguments to syscall-like functions correctly.
if arg := *argp; arg.Op() == ir.OSLICELIT {
list := arg.(*ir.CompLitExpr).List
for i := range list {
visit(arg.Pos(), &list[i])
el := &list[i]
if list[i].Op() == ir.OKEY {
el = &list[i].(*ir.KeyExpr).Value
}
visit(arg.Pos(), el)
}
} else {
visit(call.Pos(), argp)

View File

@ -31,7 +31,7 @@ func dumpasmhdr() {
if t == constant.Float || t == constant.Complex {
break
}
fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym().Name, n.Val())
fmt.Fprintf(b, "#define const_%s %v\n", n.Sym().Name, n.Val())
case ir.OTYPE:
t := n.Type()

View File

@ -107,7 +107,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
// Record flags that affect the build result. (And don't
// record flags that don't, since that would cause spurious
// changes in the binary.)
dwarfgen.RecordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarf", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre")
dwarfgen.RecordFlags("B", "N", "l", "msan", "race", "asan", "shared", "dynlink", "dwarf", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre")
if !base.EnableTrace && base.Flag.LowerT {
log.Fatalf("compiler not built with support for -t")
@ -149,11 +149,12 @@ func Main(archInit func(*ssagen.ArchInfo)) {
if base.Compiling(base.NoInstrumentPkgs) {
base.Flag.Race = false
base.Flag.MSan = false
base.Flag.ASan = false
}
ssagen.Arch.LinkArch.Init(base.Ctxt)
startProfile()
if base.Flag.Race || base.Flag.MSan {
if base.Flag.Race || base.Flag.MSan || base.Flag.ASan {
base.Flag.Cfg.Instrumenting = true
}
if base.Flag.Dwarf {
@ -244,6 +245,11 @@ func Main(archInit func(*ssagen.ArchInfo)) {
base.Timer.Start("fe", "inlining")
if base.Flag.LowerL != 0 {
inline.InlinePackage()
// If any new fully-instantiated types were referenced during
// inlining, we need to create needed instantiations.
if len(typecheck.GetInstTypeList()) > 0 {
noder.BuildInstantiations(false)
}
}
noder.MakeWrappers(typecheck.Target) // must happen after inlining

View File

@ -249,8 +249,7 @@ func addGCLocals() {
}
}
if x := fn.StackObjects; x != nil {
attr := int16(obj.RODATA)
objw.Global(x, int32(len(x.P)), attr)
objw.Global(x, int32(len(x.P)), obj.RODATA)
x.Set(obj.AttrStatic, true)
}
if x := fn.OpenCodedDeferInfo; x != nil {
@ -260,6 +259,10 @@ func addGCLocals() {
objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
x.Set(obj.AttrStatic, true)
}
if x := fn.ArgLiveInfo; x != nil {
objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
x.Set(obj.AttrStatic, true)
}
}
}

View File

@ -12,10 +12,7 @@ import (
"cmd/compile/internal/base"
)
var (
memprofilerate int64
traceHandler func(string)
)
var traceHandler func(string)
func startProfile() {
if base.Flag.CPUProfile != "" {
@ -29,8 +26,8 @@ func startProfile() {
base.AtExit(pprof.StopCPUProfile)
}
if base.Flag.MemProfile != "" {
if memprofilerate != 0 {
runtime.MemProfileRate = int(memprofilerate)
if base.Flag.MemProfileRate != 0 {
runtime.MemProfileRate = base.Flag.MemProfileRate
}
f, err := os.Create(base.Flag.MemProfile)
if err != nil {

View File

@ -43,12 +43,12 @@ func (r *intReader) uint64() uint64 {
// Keep this in sync with constants in iexport.go.
const (
iexportVersionGo1_11 = 0
iexportVersionPosCol = 1
// TODO: before release, change this back to 2.
iexportVersionGenerics = iexportVersionPosCol
iexportVersionGo1_11 = 0
iexportVersionPosCol = 1
iexportVersionGenerics = 1 // probably change to 2 before release
iexportVersionGo1_18 = 2
iexportVersionCurrent = iexportVersionGenerics
iexportVersionCurrent = 2
)
type ident struct {
@ -99,13 +99,9 @@ func ImportData(imports map[string]*types2.Package, data, path string) (pkg *typ
version = int64(r.uint64())
switch version {
case /* iexportVersionGenerics, */ iexportVersionPosCol, iexportVersionGo1_11:
case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11:
default:
if version > iexportVersionGenerics {
errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
} else {
errorf("unknown iexport format version %d", version)
}
errorf("unknown iexport format version %d", version)
}
sLen := int64(r.uint64())
@ -374,7 +370,19 @@ func (r *importReader) obj(name string) {
id := ident{r.currPkg.Name(), name}
r.p.tparamIndex[id] = t
t.SetConstraint(r.typ())
var implicit bool
if r.p.exportVersion >= iexportVersionGo1_18 {
implicit = r.bool()
}
constraint := r.typ()
if implicit {
iface, _ := constraint.(*types2.Interface)
if iface == nil {
errorf("non-interface constraint marked implicit")
}
iface.MarkImplicit()
}
t.SetConstraint(constraint)
case 'V':
typ := r.typ()
@ -392,6 +400,10 @@ func (r *importReader) declare(obj types2.Object) {
func (r *importReader) value() (typ types2.Type, val constant.Value) {
typ = r.typ()
if r.p.exportVersion >= iexportVersionGo1_18 {
// TODO: add support for using the kind
_ = constant.Kind(r.int64())
}
switch b := typ.Underlying().(*types2.Basic); b.Info() & types2.IsConstType {
case types2.IsBoolean:

View File

@ -309,7 +309,7 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
break
}
if fn := inlCallee(n.X); fn != nil && fn.Inl != nil {
if fn := inlCallee(n.X); fn != nil && typecheck.HaveInlineBody(fn) {
v.budget -= fn.Inl.Cost
break
}
@ -358,8 +358,7 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
return true
}
case ir.ORANGE,
ir.OSELECT,
case ir.OSELECT,
ir.OGO,
ir.ODEFER,
ir.ODCLTYPE, // can't print yet
@ -390,27 +389,6 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
// These nodes don't produce code; omit from inlining budget.
return false
case ir.OFOR, ir.OFORUNTIL:
n := n.(*ir.ForStmt)
if n.Label != nil {
v.reason = "labeled control"
return true
}
case ir.OSWITCH:
n := n.(*ir.SwitchStmt)
if n.Label != nil {
v.reason = "labeled control"
return true
}
// case ir.ORANGE, ir.OSELECT in "unhandled" above
case ir.OBREAK, ir.OCONTINUE:
n := n.(*ir.BranchStmt)
if n.Label != nil {
// Should have short-circuited due to labeled control error above.
base.Fatalf("unexpected labeled break/continue: %v", n)
}
case ir.OIF:
n := n.(*ir.IfStmt)
if ir.IsConst(n.Cond, constant.Bool) {
@ -607,7 +585,7 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
if ir.IsIntrinsicCall(call) {
break
}
if fn := inlCallee(call.X); fn != nil && fn.Inl != nil {
if fn := inlCallee(call.X); fn != nil && typecheck.HaveInlineBody(fn) {
n = mkinlcall(call, fn, maxCost, inlMap, edit)
}
}
@ -707,6 +685,27 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
return n
}
// Don't inline a function fn that has no shape parameters, but is passed at
// least one shape arg. This means we must be inlining a non-generic function
// fn that was passed into a generic function, and can be called with a shape
// arg because it matches an appropriate type parameters. But fn may include
// an interface conversion (that may be applied to a shape arg) that was not
// apparent when we first created the instantiation of the generic function.
// We can't handle this if we actually do the inlining, since we want to know
// all interface conversions immediately after stenciling. So, we avoid
// inlining in this case. See #49309.
if !fn.Type().HasShape() {
for _, arg := range n.Args {
if arg.Type().HasShape() {
if logopt.Enabled() {
logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
fmt.Sprintf("inlining non-shape function %v with shape args", ir.FuncName(fn)))
}
return n
}
}
}
if base.Flag.Cfg.Instrumenting && types.IsRuntimePkg(fn.Sym().Pkg) {
// Runtime package must not be instrumented.
// Instrument skips runtime package. However, some runtime code can be
@ -1244,7 +1243,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
// Don't do special substitutions if inside a closure
break
}
// Since we don't handle bodies with closures,
// Because of the above test for subst.newclofn,
// this return is guaranteed to belong to the current inlined function.
n := n.(*ir.ReturnStmt)
init := subst.list(n.Init())
@ -1272,7 +1271,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
typecheck.Stmts(init)
return ir.NewBlockStmt(base.Pos, init)
case ir.OGOTO:
case ir.OGOTO, ir.OBREAK, ir.OCONTINUE:
if subst.newclofn != nil {
// Don't do special substitutions if inside a closure
break
@ -1280,9 +1279,8 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
n := n.(*ir.BranchStmt)
m := ir.Copy(n).(*ir.BranchStmt)
m.SetPos(subst.updatedPos(m.Pos()))
*m.PtrInit() = nil
p := fmt.Sprintf("%s·%d", n.Label.Name, inlgen)
m.Label = typecheck.Lookup(p)
m.SetInit(nil)
m.Label = translateLabel(n.Label)
return m
case ir.OLABEL:
@ -1293,9 +1291,8 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
n := n.(*ir.LabelStmt)
m := ir.Copy(n).(*ir.LabelStmt)
m.SetPos(subst.updatedPos(m.Pos()))
*m.PtrInit() = nil
p := fmt.Sprintf("%s·%d", n.Label.Name, inlgen)
m.Label = typecheck.Lookup(p)
m.SetInit(nil)
m.Label = translateLabel(n.Label)
return m
case ir.OCLOSURE:
@ -1307,6 +1304,27 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
m.SetPos(subst.updatedPos(m.Pos()))
ir.EditChildren(m, subst.edit)
if subst.newclofn == nil {
// Translate any label on FOR, RANGE loops or SWITCH
switch m.Op() {
case ir.OFOR:
m := m.(*ir.ForStmt)
m.Label = translateLabel(m.Label)
return m
case ir.ORANGE:
m := m.(*ir.RangeStmt)
m.Label = translateLabel(m.Label)
return m
case ir.OSWITCH:
m := m.(*ir.SwitchStmt)
m.Label = translateLabel(m.Label)
return m
}
}
switch m := m.(type) {
case *ir.AssignStmt:
if lhs, ok := m.X.(*ir.Name); ok && lhs.Defn == &subst.defnMarker {
@ -1323,6 +1341,16 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
return m
}
// translateLabel makes a label from an inlined function (if non-nil) be unique by
// adding "·inlgen".
func translateLabel(l *types.Sym) *types.Sym {
if l == nil {
return nil
}
p := fmt.Sprintf("%s·%d", l.Name, inlgen)
return typecheck.Lookup(p)
}
func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos {
if subst.noPosUpdate {
return xpos

View File

@ -201,7 +201,7 @@ const (
funcNilCheckDisabled // disable nil checks when compiling this function
funcInlinabilityChecked // inliner has already determined whether the function is inlinable
funcExportInline // include inline body in export data
funcInstrumentBody // add race/msan instrumentation during SSA construction
funcInstrumentBody // add race/msan/asan instrumentation during SSA construction
funcOpenCodedDeferDisallowed // can't do open-coded defers
funcClosureCalled // closure is only immediately called; used by escape analysis
)

View File

@ -146,7 +146,10 @@ func (n *Name) editChildren(edit func(Node) Node) {}
// That is, given "type T Defn", it returns Defn.
// It is used by package types.
func (n *Name) TypeDefn() *types.Type {
return n.Ntype.Type()
if n.Ntype != nil {
return n.Ntype.Type()
}
return n.Type()
}
// RecordFrameOffset records the frame offset for the name.

View File

@ -15,6 +15,8 @@ var Syms struct {
AssertE2I2 *obj.LSym
AssertI2I *obj.LSym
AssertI2I2 *obj.LSym
Asanread *obj.LSym
Asanwrite *obj.LSym
CheckPtrAlignment *obj.LSym
Deferproc *obj.LSym
DeferprocStack *obj.LSym

View File

@ -0,0 +1,339 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package liveness
import (
"fmt"
"cmd/compile/internal/base"
"cmd/compile/internal/bitvec"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/internal/obj"
"cmd/internal/objabi"
)
// Argument liveness tracking.
//
// For arguments passed in registers, this file tracks if their spill slots
// are live for runtime traceback. An argument spill slot is live at a PC
// if we know that an actual value has stored into it at or before this point.
//
// Stack args are always live and not tracked in this code. Stack args are
// laid out before register spill slots, so we emit the smallest offset that
// needs tracking. Slots before that offset are always live. That offset is
// usually the offset of the first spill slot. But if the first spill slot is
// always live (e.g. if it is address-taken), it will be the offset of a later
// one.
//
// The liveness information is emitted as a FUNCDATA and a PCDATA.
//
// FUNCDATA format:
// - start (smallest) offset that needs tracking (1 byte)
// - a list of bitmaps.
// In a bitmap bit i is set if the i-th spill slot is live.
//
// At a PC where the liveness info changes, a PCDATA indicates the
// byte offset of the liveness map in the FUNCDATA. PCDATA -1 is a
// special case indicating all slots are live (for binary size
// saving).
const allLiveIdx = -1
// name and offset
type nameOff struct {
n *ir.Name
off int64
}
func (a nameOff) FrameOffset() int64 { return a.n.FrameOffset() + a.off }
func (a nameOff) String() string { return fmt.Sprintf("%v+%d", a.n, a.off) }
type blockArgEffects struct {
livein bitvec.BitVec // variables live at block entry
liveout bitvec.BitVec // variables live at block exit
}
type argLiveness struct {
fn *ir.Func
f *ssa.Func
args []nameOff // name and offset of spill slots
idx map[nameOff]int32 // index in args
be []blockArgEffects // indexed by block ID
bvset bvecSet // Set of liveness bitmaps, used for uniquifying.
// Liveness map indices at each Value (where it changes) and Block entry.
// During the computation the indices are temporarily index to bvset.
// At the end they will be index (offset) to the output funcdata (changed
// in (*argLiveness).emit).
blockIdx map[ssa.ID]int
valueIdx map[ssa.ID]int
}
// ArgLiveness computes the liveness information of register argument spill slots.
// An argument's spill slot is "live" if we know it contains a meaningful value,
// that is, we have stored the register value to it.
// Returns the liveness map indices at each Block entry and at each Value (where
// it changes).
func ArgLiveness(fn *ir.Func, f *ssa.Func, pp *objw.Progs) (blockIdx, valueIdx map[ssa.ID]int) {
if f.OwnAux.ABIInfo().InRegistersUsed() == 0 || base.Flag.N != 0 {
// No register args. Nothing to emit.
// Or if -N is used we spill everything upfront so it is always live.
return nil, nil
}
lv := &argLiveness{
fn: fn,
f: f,
idx: make(map[nameOff]int32),
be: make([]blockArgEffects, f.NumBlocks()),
blockIdx: make(map[ssa.ID]int),
valueIdx: make(map[ssa.ID]int),
}
// Gather all register arg spill slots.
for _, a := range f.OwnAux.ABIInfo().InParams() {
n, ok := a.Name.(*ir.Name)
if !ok || len(a.Registers) == 0 {
continue
}
_, offs := a.RegisterTypesAndOffsets()
for _, off := range offs {
if n.FrameOffset()+off > 0xff {
// We only print a limited number of args, with stack
// offsets no larger than 255.
continue
}
lv.args = append(lv.args, nameOff{n, off})
}
}
if len(lv.args) > 10 {
lv.args = lv.args[:10] // We print no more than 10 args.
}
// We spill address-taken or non-SSA-able value upfront, so they are always live.
alwaysLive := func(n *ir.Name) bool { return n.Addrtaken() || !f.Frontend().CanSSA(n.Type()) }
// We'll emit the smallest offset for the slots that need liveness info.
// No need to include a slot with a lower offset if it is always live.
for len(lv.args) > 0 && alwaysLive(lv.args[0].n) {
lv.args = lv.args[1:]
}
if len(lv.args) == 0 {
return // everything is always live
}
for i, a := range lv.args {
lv.idx[a] = int32(i)
}
nargs := int32(len(lv.args))
bulk := bitvec.NewBulk(nargs, int32(len(f.Blocks)*2))
for _, b := range f.Blocks {
be := &lv.be[b.ID]
be.livein = bulk.Next()
be.liveout = bulk.Next()
// initialize to all 1s, so we can AND them
be.livein.Not()
be.liveout.Not()
}
entrybe := &lv.be[f.Entry.ID]
entrybe.livein.Clear()
for i, a := range lv.args {
if alwaysLive(a.n) {
entrybe.livein.Set(int32(i))
}
}
// Visit blocks in reverse-postorder, compute block effects.
po := f.Postorder()
for i := len(po) - 1; i >= 0; i-- {
b := po[i]
be := &lv.be[b.ID]
// A slot is live at block entry if it is live in all predecessors.
for _, pred := range b.Preds {
pb := pred.Block()
be.livein.And(be.livein, lv.be[pb.ID].liveout)
}
be.liveout.Copy(be.livein)
for _, v := range b.Values {
lv.valueEffect(v, be.liveout)
}
}
// Coalesce identical live vectors. Compute liveness indices at each PC
// where it changes.
live := bitvec.New(nargs)
addToSet := func(bv bitvec.BitVec) (int, bool) {
if bv.Count() == int(nargs) { // special case for all live
return allLiveIdx, false
}
return lv.bvset.add(bv)
}
for _, b := range lv.f.Blocks {
be := &lv.be[b.ID]
lv.blockIdx[b.ID], _ = addToSet(be.livein)
live.Copy(be.livein)
var lastv *ssa.Value
for i, v := range b.Values {
if lv.valueEffect(v, live) {
// Record that liveness changes but not emit a map now.
// For a sequence of StoreRegs we only need to emit one
// at last.
lastv = v
}
if lastv != nil && (mayFault(v) || i == len(b.Values)-1) {
// Emit the liveness map if it may fault or at the end of
// the block. We may need a traceback if the instruction
// may cause a panic.
var added bool
lv.valueIdx[lastv.ID], added = addToSet(live)
if added {
// live is added to bvset and we cannot modify it now.
// Make a copy.
t := live
live = bitvec.New(nargs)
live.Copy(t)
}
lastv = nil
}
}
// Sanity check.
if !live.Eq(be.liveout) {
panic("wrong arg liveness map at block end")
}
}
// Emit funcdata symbol, update indices to offsets in the symbol data.
lsym := lv.emit()
fn.LSym.Func().ArgLiveInfo = lsym
//lv.print()
p := pp.Prog(obj.AFUNCDATA)
p.From.SetConst(objabi.FUNCDATA_ArgLiveInfo)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = lsym
return lv.blockIdx, lv.valueIdx
}
// valueEffect applies the effect of v to live, return whether it is changed.
func (lv *argLiveness) valueEffect(v *ssa.Value, live bitvec.BitVec) bool {
if v.Op != ssa.OpStoreReg { // TODO: include other store instructions?
return false
}
n, off := ssa.AutoVar(v)
if n.Class != ir.PPARAM {
return false
}
i, ok := lv.idx[nameOff{n, off}]
if !ok || live.Get(i) {
return false
}
live.Set(i)
return true
}
func mayFault(v *ssa.Value) bool {
switch v.Op {
case ssa.OpLoadReg, ssa.OpStoreReg, ssa.OpCopy, ssa.OpPhi,
ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive,
ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult,
ssa.OpConvert, ssa.OpInlMark, ssa.OpGetG:
return false
}
if len(v.Args) == 0 {
return false // assume constant op cannot fault
}
return true // conservatively assume all other ops could fault
}
func (lv *argLiveness) print() {
fmt.Println("argument liveness:", lv.f.Name)
live := bitvec.New(int32(len(lv.args)))
for _, b := range lv.f.Blocks {
be := &lv.be[b.ID]
fmt.Printf("%v: live in: ", b)
lv.printLivenessVec(be.livein)
if idx, ok := lv.blockIdx[b.ID]; ok {
fmt.Printf(" #%d", idx)
}
fmt.Println()
for _, v := range b.Values {
if lv.valueEffect(v, live) {
fmt.Printf(" %v: ", v)
lv.printLivenessVec(live)
if idx, ok := lv.valueIdx[v.ID]; ok {
fmt.Printf(" #%d", idx)
}
fmt.Println()
}
}
fmt.Printf("%v: live out: ", b)
lv.printLivenessVec(be.liveout)
fmt.Println()
}
fmt.Println("liveness maps data:", lv.fn.LSym.Func().ArgLiveInfo.P)
}
func (lv *argLiveness) printLivenessVec(bv bitvec.BitVec) {
for i, a := range lv.args {
if bv.Get(int32(i)) {
fmt.Printf("%v ", a)
}
}
}
func (lv *argLiveness) emit() *obj.LSym {
livenessMaps := lv.bvset.extractUnique()
// stack offsets of register arg spill slots
argOffsets := make([]uint8, len(lv.args))
for i, a := range lv.args {
off := a.FrameOffset()
if off > 0xff {
panic("offset too large")
}
argOffsets[i] = uint8(off)
}
idx2off := make([]int, len(livenessMaps))
lsym := base.Ctxt.Lookup(lv.fn.LSym.Name + ".argliveinfo")
lsym.Set(obj.AttrContentAddressable, true)
off := objw.Uint8(lsym, 0, argOffsets[0]) // smallest offset that needs liveness info.
for idx, live := range livenessMaps {
idx2off[idx] = off
off = objw.BitVec(lsym, off, live)
}
// Update liveness indices to offsets.
for i, x := range lv.blockIdx {
if x != allLiveIdx {
lv.blockIdx[i] = idx2off[x]
}
}
for i, x := range lv.valueIdx {
if x != allLiveIdx {
lv.valueIdx[i] = idx2off[x]
}
}
return lsym
}

View File

@ -47,9 +47,10 @@ func (m *bvecSet) grow() {
m.index = newIndex
}
// add adds bv to the set and returns its index in m.extractUnique.
// The caller must not modify bv after this.
func (m *bvecSet) add(bv bitvec.BitVec) int {
// add adds bv to the set and returns its index in m.extractUnique,
// and whether it is newly added.
// If it is newly added, the caller must not modify bv after this.
func (m *bvecSet) add(bv bitvec.BitVec) (int, bool) {
if len(m.uniq)*4 >= len(m.index) {
m.grow()
}
@ -62,12 +63,12 @@ func (m *bvecSet) add(bv bitvec.BitVec) int {
// New bvec.
index[h] = len(m.uniq)
m.uniq = append(m.uniq, bv)
return len(m.uniq) - 1
return len(m.uniq) - 1, true
}
jlive := m.uniq[j]
if bv.Eq(jlive) {
// Existing bvec.
return j
return j, false
}
h++

View File

@ -854,8 +854,9 @@ func (lv *liveness) epilogue() {
if lv.fn.OpenCodedDeferDisallowed() {
lv.livenessMap.DeferReturn = objw.LivenessDontCare
} else {
idx, _ := lv.stackMapSet.add(livedefer)
lv.livenessMap.DeferReturn = objw.LivenessIndex{
StackMapIndex: lv.stackMapSet.add(livedefer),
StackMapIndex: idx,
IsUnsafePoint: false,
}
}
@ -902,7 +903,7 @@ func (lv *liveness) compact(b *ssa.Block) {
isUnsafePoint := lv.allUnsafe || v.Op != ssa.OpClobber && lv.unsafePoints.Get(int32(v.ID))
idx := objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: isUnsafePoint}
if hasStackMap {
idx.StackMapIndex = lv.stackMapSet.add(lv.livevars[pos])
idx.StackMapIndex, _ = lv.stackMapSet.add(lv.livevars[pos])
pos++
}
if hasStackMap || isUnsafePoint {

View File

@ -132,7 +132,11 @@ func (g *irgen) funcDecl(out *ir.Nodes, decl *syntax.FuncDecl) {
g.target.Inits = append(g.target.Inits, fn)
}
haveEmbed := g.haveEmbed
g.later(func() {
defer func(b bool) { g.haveEmbed = b }(g.haveEmbed)
g.haveEmbed = haveEmbed
if fn.Type().HasTParam() {
g.topFuncIsGeneric = true
}
@ -241,12 +245,15 @@ func (g *irgen) varDecl(out *ir.Nodes, decl *syntax.VarDecl) {
if decl.Pragma != nil {
pragma := decl.Pragma.(*pragmas)
// TODO(mdempsky): Plumb noder.importedEmbed through to here.
varEmbed(g.makeXPos, names[0], decl, pragma, true)
varEmbed(g.makeXPos, names[0], decl, pragma, g.haveEmbed)
g.reportUnused(pragma)
}
haveEmbed := g.haveEmbed
do := func() {
defer func(b bool) { g.haveEmbed = b }(g.haveEmbed)
g.haveEmbed = haveEmbed
values := g.exprList(decl.Values)
var as2 *ir.AssignListStmt

View File

@ -235,12 +235,6 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto
return DotField(pos, x, last)
}
// TODO(danscales,mdempsky): Interface method sets are not sorted the
// same between types and types2. In particular, using "last" here
// without conversion will likely fail if an interface contains
// unexported methods from two different packages (due to cross-package
// interface embedding).
var n ir.Node
method2 := selinfo.Obj().(*types2.Func)
@ -344,7 +338,7 @@ func (g *irgen) compLit(typ types2.Type, lit *syntax.CompositeLit) ir.Node {
return typed(g.typ(typ), n)
}
_, isStruct := typ.Underlying().(*types2.Struct)
_, isStruct := types2.Structure(typ).(*types2.Struct)
exprs := make([]ir.Node, len(lit.ElemList))
for i, elem := range lit.ElemList {

View File

@ -183,7 +183,7 @@ func Call(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool)
// If no type params, do the normal call transformations. This
// will convert OCALL to OCALLFUNC.
typed(typ, n)
transformCall(n, nil)
transformCall(n)
return n
}

View File

@ -127,6 +127,8 @@ func openPackage(path string) (*os.File, error) {
suffix = "_race"
} else if base.Flag.MSan {
suffix = "_msan"
} else if base.Flag.ASan {
suffix = "_asan"
}
if file, err := os.Open(fmt.Sprintf("%s/pkg/%s_%s%s/%s.a", buildcfg.GOROOT, buildcfg.GOOS, buildcfg.GOARCH, suffix, path)); err == nil {

View File

@ -44,7 +44,6 @@ func checkFiles(noders []*noder) (posMap, *types2.Package, *types2.Info) {
GoVersion: base.Flag.Lang,
IgnoreLabels: true, // parser already checked via syntax.CheckBranches mode
CompilerErrorMessages: true, // use error strings matching existing compiler errors
AllowTypeLists: true, // remove this line once all tests use type set syntax
Error: func(err error) {
terr := err.(types2.Error)
base.ErrorfAt(m.makeXPos(terr.Pos), "%s", terr.Msg)
@ -148,6 +147,9 @@ type irgen struct {
// laterFuncs records tasks that need to run after all declarations
// are processed.
laterFuncs []func()
// haveEmbed indicates whether the current node belongs to file that
// imports "embed" package.
haveEmbed bool
// exprStmtOK indicates whether it's safe to generate expressions or
// statements yet.
@ -156,16 +158,6 @@ type irgen struct {
// types which we need to finish, by doing g.fillinMethods.
typesToFinalize []*typeDelayInfo
dnum int // for generating unique dictionary variables
// Map from a name of function that been instantiated to information about
// its instantiated function (including dictionary format).
instInfoMap map[*types.Sym]*instInfo
// dictionary syms which we need to finish, by writing out any itabconv
// entries.
dictSymsToFinalize []*delayInfo
// True when we are compiling a top-level generic function or method. Use to
// avoid adding closures of generic functions/methods to the target.Decls
// list.
@ -178,6 +170,23 @@ type irgen struct {
curDecl string
}
// genInst has the information for creating needed instantiations and modifying
// functions to use instantiations.
type genInst struct {
dnum int // for generating unique dictionary variables
// Map from the names of all instantiations to information about the
// instantiations.
instInfoMap map[*types.Sym]*instInfo
// Dictionary syms which we need to finish, by writing out any itabconv
// entries.
dictSymsToFinalize []*delayInfo
// New instantiations created during this round of buildInstantiations().
newInsts []ir.Node
}
func (g *irgen) later(fn func()) {
g.laterFuncs = append(g.laterFuncs, fn)
}
@ -255,8 +264,11 @@ Outer:
types.ResumeCheckSize()
// 3. Process all remaining declarations.
for _, declList := range declLists {
for i, declList := range declLists {
old := g.haveEmbed
g.haveEmbed = noders[i].importedEmbed
g.decls((*ir.Nodes)(&g.target.Decls), declList)
g.haveEmbed = old
}
g.exprStmtOK = true
@ -303,8 +315,9 @@ Outer:
typecheck.DeclareUniverse()
// Create any needed stencils of generic functions
g.stencil()
// Create any needed instantiations of generic functions and transform
// existing and new functions to use those instantiations.
BuildInstantiations(true)
// Remove all generic functions from g.target.Decl, since they have been
// used for stenciling, but don't compile. Generic functions will already

View File

@ -36,7 +36,7 @@ func LoadPackage(filenames []string) {
mode := syntax.CheckBranches
if supportsGenerics {
mode |= syntax.AllowGenerics | syntax.AllowTypeSets
mode |= syntax.AllowGenerics
}
// Limit the number of simultaneously open files.
@ -323,8 +323,7 @@ func (p *noder) processPragmas() {
}
n := ir.AsNode(typecheck.Lookup(l.local).Def)
if n == nil || n.Op() != ir.ONAME {
// TODO(mdempsky): Change to p.errorAt before Go 1.17 release.
// base.WarnfAt(p.makeXPos(l.pos), "//go:linkname must refer to declared function or variable (will be an error in Go 1.17)")
p.errorAt(l.pos, "//go:linkname must refer to declared function or variable")
continue
}
if n.Sym().Linkname != "" {
@ -1238,7 +1237,7 @@ func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node {
init := p.stmt(stmt.Init)
n := ir.NewIfStmt(p.pos(stmt), p.expr(stmt.Cond), p.blockStmt(stmt.Then), nil)
if init != nil {
*n.PtrInit() = []ir.Node{init}
n.SetInit([]ir.Node{init})
}
if stmt.Else != nil {
e := p.stmt(stmt.Else)
@ -1285,7 +1284,7 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node {
init := p.stmt(stmt.Init)
n := ir.NewSwitchStmt(p.pos(stmt), p.expr(stmt.Tag), nil)
if init != nil {
*n.PtrInit() = []ir.Node{init}
n.SetInit([]ir.Node{init})
}
var tswitch *ir.TypeSwitchGuard

View File

@ -466,7 +466,7 @@ func (r *reader) interfaceType() *types.Type {
if len(fields) == 0 {
return types.Types[types.TINTER] // empty interface
}
return types.NewInterface(tpkg, fields)
return types.NewInterface(tpkg, fields, false)
}
func (r *reader) structType() *types.Type {

File diff suppressed because it is too large Load Diff

View File

@ -132,9 +132,7 @@ func transformConvCall(n *ir.CallExpr) ir.Node {
// transformCall transforms a normal function/method call. Corresponds to last half
// (non-conversion, non-builtin part) of typecheck.tcCall. This code should work even
// in the case of OCALL/OFUNCINST.
// The dict parameter is used for OCALLINTER nodes to ensure that the called method
// is retained by the linker.
func transformCall(n *ir.CallExpr, dict *ir.Name) {
func transformCall(n *ir.CallExpr) {
// n.Type() can be nil for calls with no return value
assert(n.Typecheck() == 1)
transformArgs(n)
@ -144,17 +142,6 @@ func transformCall(n *ir.CallExpr, dict *ir.Name) {
switch l.Op() {
case ir.ODOTINTER:
n.SetOp(ir.OCALLINTER)
if n.X.(*ir.SelectorExpr).X.Type().HasShape() {
if dict == nil {
base.Fatalf("calls on shape interfaces need a dictionary reference")
}
dict.SetAddrtaken(true)
// KeepAlive isn't exactly the right thing here, as we only
// need to keep the dictionary live in the linker-deadcode
// sense, not the at-runtime sense. But the at-runtime sense
// is stronger, so it works. See issue 48047.
n.KeepAlive = append(n.KeepAlive, dict)
}
case ir.ODOTMETH:
l := l.(*ir.SelectorExpr)

View File

@ -213,7 +213,7 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
methods[i] = types.NewField(g.pos(m), g.selector(m), mtyp)
}
return types.NewInterface(g.tpkg(typ), append(embeddeds, methods...))
return types.NewInterface(g.tpkg(typ), append(embeddeds, methods...), typ.IsImplicit())
case *types2.TypeParam:
// Save the name of the type parameter in the sym of the type.

View File

@ -18,6 +18,7 @@ import (
"cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typebits"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
@ -958,11 +959,6 @@ func writeType(t *types.Type) *obj.LSym {
base.Fatalf("unresolved defined type: %v", tbase)
}
dupok := 0
if tbase.Sym() == nil || tbase.HasShape() { // TODO(mdempsky): Probably need DUPOK for instantiated types too.
dupok = obj.DUPOK
}
if !NeedEmit(tbase) {
if i := typecheck.BaseTypeIndex(t); i >= 0 {
lsym.Pkg = tbase.Sym().Pkg.Prefix
@ -1195,7 +1191,9 @@ func writeType(t *types.Type) *obj.LSym {
}
ot = dextratypeData(lsym, ot, t)
objw.Global(lsym, int32(ot), int16(dupok|obj.RODATA))
objw.Global(lsym, int32(ot), int16(obj.DUPOK|obj.RODATA))
// Note: DUPOK is required to ensure that we don't end up with more
// than one type descriptor for a given type.
// The linker will leave a table of all the typelinks for
// types in the binary, so the runtime can find them.
@ -1412,6 +1410,9 @@ func WriteBasicTypes() {
if base.Flag.MSan {
dimportpath(types.NewPkg("runtime/msan", ""))
}
if base.Flag.ASan {
dimportpath(types.NewPkg("runtime/asan", ""))
}
dimportpath(types.NewPkg("main", ""))
}
@ -1995,8 +1996,33 @@ func MarkUsedIfaceMethod(n *ir.CallExpr) {
dot := n.X.(*ir.SelectorExpr)
ityp := dot.X.Type()
if ityp.HasShape() {
base.Fatalf("marking method of shape type used %+v %s", ityp, dot.Sel.Name)
// Here we're calling a method on a generic interface. Something like:
//
// type I[T any] interface { foo() T }
// func f[T any](x I[T]) {
// ... = x.foo()
// }
// f[int](...)
// f[string](...)
//
// In this case, in f we're calling foo on a generic interface.
// Which method could that be? Normally we could match the method
// both by name and by type. But in this case we don't really know
// the type of the method we're calling. It could be func()int
// or func()string. So we match on just the function name, instead
// of both the name and the type used for the non-generic case below.
// TODO: instantiations at least know the shape of the instantiated
// type, and the linker could do more complicated matching using
// some sort of fuzzy shape matching. For now, only use the name
// of the method for matching.
r := obj.Addrel(ir.CurFunc.LSym)
// We use a separate symbol just to tell the linker the method name.
// (The symbol itself is not needed in the final binary.)
r.Sym = staticdata.StringSym(src.NoXPos, dot.Sel.Name)
r.Type = objabi.R_USEGENERICIFACEMETHOD
return
}
tsym := TypeLinksym(ityp)
r := obj.Addrel(ir.CurFunc.LSym)
r.Sym = tsym
@ -2007,16 +2033,6 @@ func MarkUsedIfaceMethod(n *ir.CallExpr) {
r.Type = objabi.R_USEIFACEMETHOD
}
// MarkUsedIfaceMethodIndex marks that that method number ix (in the AllMethods list)
// of interface type ityp is used, and should be attached to lsym.
func MarkUsedIfaceMethodIndex(lsym *obj.LSym, ityp *types.Type, ix int) {
tsym := TypeLinksym(ityp)
r := obj.Addrel(lsym)
r.Sym = tsym
r.Add = InterfaceMethodOffset(ityp, int64(ix))
r.Type = objabi.R_USEIFACEMETHOD
}
// getDictionary returns the dictionary for the given named generic function
// or method, with the given type arguments.
func getDictionary(gf *types.Sym, targs []*types.Type) ir.Node {

View File

@ -279,7 +279,8 @@ func (b *Block) AddEdgeTo(c *Block) {
// removePred removes the ith input edge from b.
// It is the responsibility of the caller to remove
// the corresponding successor edge.
// the corresponding successor edge, and adjust any
// phi values by calling b.removePhiArg(v, i).
func (b *Block) removePred(i int) {
n := len(b.Preds) - 1
if i != n {
@ -322,6 +323,28 @@ func (b *Block) swapSuccessors() {
b.Likely *= -1
}
// removePhiArg removes the ith arg from phi.
// It must be called after calling b.removePred(i) to
// adjust the corresponding phi value of the block:
//
// b.removePred(i)
// for _, v := range b.Values {
// if v.Op != OpPhi {
// continue
// }
// b.removeArg(v, i)
// }
func (b *Block) removePhiArg(phi *Value, i int) {
n := len(b.Preds)
if numPhiArgs := len(phi.Args); numPhiArgs-1 != n {
b.Fatalf("inconsistent state, num predecessors: %d, num phi args: %d", n, numPhiArgs)
}
phi.Args[i].Uses--
phi.Args[i] = phi.Args[n]
phi.Args[n] = nil
phi.Args = phi.Args[:n]
}
// LackingPos indicates whether b is a block whose position should be inherited
// from its successors. This is true if all the values within it have unreliable positions
// and if it is "plain", meaning that there is no control flow that is also very likely

View File

@ -22,7 +22,7 @@ import "cmd/internal/src"
func branchelim(f *Func) {
// FIXME: add support for lowering CondSelects on more architectures
switch f.Config.arch {
case "arm64", "amd64", "wasm":
case "arm64", "ppc64le", "ppc64", "amd64", "wasm":
// implemented
default:
return

View File

@ -91,14 +91,13 @@ func critical(f *Func) {
b.removePred(i)
// Update corresponding phi args
n := len(b.Preds)
phi.Args[i].Uses--
phi.Args[i] = phi.Args[n]
phi.Args[n] = nil
phi.Args = phi.Args[:n]
b.removePhiArg(phi, i)
// splitting occasionally leads to a phi having
// a single argument (occurs with -N)
if n == 1 {
// TODO(cuonglm,khr): replace this with phielimValue, and
// make removePhiArg incorporates that.
if len(b.Preds) == 1 {
phi.Op = OpCopy
}
// Don't increment i in this case because we moved

View File

@ -348,15 +348,11 @@ func (b *Block) removeEdge(i int) {
c.removePred(j)
// Remove phi args from c's phis.
n := len(c.Preds)
for _, v := range c.Values {
if v.Op != OpPhi {
continue
}
v.Args[j].Uses--
v.Args[j] = v.Args[n]
v.Args[n] = nil
v.Args = v.Args[:n]
c.removePhiArg(v, j)
phielimValue(v)
// Note: this is trickier than it looks. Replacing
// a Phi with a Copy can in general cause problems because

View File

@ -958,6 +958,7 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
return x.storeArgOrLoad(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc)
}
eltRO := x.regWidth(elt)
source.Type = t
for i := int64(0); i < t.NumElem(); i++ {
sel := source.Block.NewValue1I(pos, OpArraySelect, elt, i, source)
mem = x.storeArgOrLoad(pos, b, sel, mem, elt, storeOffset+i*elt.Size(), loadRegOffset, storeRc.at(t, 0))
@ -991,6 +992,7 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
return x.storeArgOrLoad(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc)
}
source.Type = t
for i := 0; i < t.NumFields(); i++ {
fld := t.Field(i)
sel := source.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)

View File

@ -78,7 +78,7 @@ func fuseBranchRedirect(f *Func) bool {
if v.Op != OpPhi {
continue
}
v.RemoveArg(k)
b.removePhiArg(v, k)
phielimValue(v)
}
// Fix up child to have one more predecessor.

View File

@ -455,7 +455,7 @@ func init() {
},
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem

View File

@ -2219,3 +2219,29 @@
(AND(Q|L) x (ADD(Q|L)const [-1] x)) && buildcfg.GOAMD64 >= 3 => (BLSR(Q|L) x)
(BSWAP(Q|L) (BSWAP(Q|L) p)) => p
// CPUID feature: MOVBE.
(MOV(Q|L)store [i] {s} p x:(BSWAP(Q|L) w) mem) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBE(Q|L)store [i] {s} p w mem)
(BSWAP(Q|L) x:(MOV(Q|L)load [i] {s} p mem)) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBE(Q|L)load [i] {s} p mem)
(BSWAP(Q|L) (MOVBE(Q|L)load [i] {s} p m)) => (MOV(Q|L)load [i] {s} p m)
(MOVBE(Q|L)store [i] {s} p (BSWAP(Q|L) x) m) => (MOV(Q|L)store [i] {s} p x m)
(ORQ x0:(MOVBELload [i0] {s} p mem)
sh:(SHLQconst [32] x1:(MOVBELload [i1] {s} p mem)))
&& i0 == i1+4
&& x0.Uses == 1
&& x1.Uses == 1
&& sh.Uses == 1
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, sh)
=> @mergePoint(b,x0,x1) (MOVBEQload [i1] {s} p mem)
(ORQ x0:(MOVBELload [i] {s} p0 mem)
sh:(SHLQconst [32] x1:(MOVBELload [i] {s} p1 mem)))
&& x0.Uses == 1
&& x1.Uses == 1
&& sh.Uses == 1
&& sequentialAddresses(p1, p0, 4)
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, sh)
=> @mergePoint(b,x0,x1) (MOVBEQload [i] {s} p1 mem)

View File

@ -765,7 +765,7 @@ func init() {
// With a register ABI, the actual register info for these instructions (i.e., what is used in regalloc) is augmented with per-call-site bindings of additional arguments to specific in and out registers.
{name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
@ -922,6 +922,12 @@ func init() {
// and BSFQ(0) is undefined. Same for TZCNTL(0)==32
{name: "TZCNTQ", argLength: 1, reg: gp11, asm: "TZCNTQ", clobberFlags: true},
{name: "TZCNTL", argLength: 1, reg: gp11, asm: "TZCNTL", clobberFlags: true},
// CPUID feature: MOVBE
{name: "MOVBELload", argLength: 2, reg: gpload, asm: "MOVBEL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load and swap 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
{name: "MOVBELstore", argLength: 3, reg: gpstore, asm: "MOVBEL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // swap and store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVBEQload", argLength: 2, reg: gpload, asm: "MOVBEQ", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load and swap 8 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVBEQstore", argLength: 3, reg: gpstore, asm: "MOVBEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // swap and store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem
}
var AMD64blocks = []blockData{

View File

@ -492,7 +492,7 @@ func init() {
// function calls
{name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem

View File

@ -432,7 +432,7 @@ func init() {
// function calls
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R7"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem

View File

@ -276,7 +276,7 @@ func init() {
// function calls
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem

View File

@ -258,7 +258,7 @@ func init() {
// function calls
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem

View File

@ -561,8 +561,10 @@
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (ORCC x y) yes no)
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (XORCC x y) yes no)
(CondSelect x y bool) && flagArg(bool) != nil => (ISEL [2] x y bool)
(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [2] x y (CMPWconst [0] bool))
// Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably.
(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (CMPWconst [0] bool))
// Fold any CR -> GPR -> CR transfers when applying the above rule.
(ISEL [6] x y (CMPWconst [0] (ISELB [c] one cmp))) => (ISEL [c] x y cmp)
// Lowering loads
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
@ -848,6 +850,8 @@
(ADDconst [c] (SUBFCconst [d] x)) && is32Bit(c+d) => (SUBFCconst [c+d] x)
(NEG (ADDconst [c] x)) && is32Bit(-c) => (SUBFCconst [-c] x)
(NEG (SUBFCconst [c] x)) && is32Bit(-c) => (ADDconst [-c] x)
(NEG (SUB x y)) => (SUB y x)
(NEG (NEG x)) => x
// Use register moves instead of stores and loads to move int<=>float values
// Common with math Float64bits, Float64frombits
@ -1087,7 +1091,7 @@
((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
// ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1
// ISEL auxInt values 4=GE 5=LE 6=NE arg2 ? arg1 : arg0
// ISEL auxInt values 4=GE 5=LE 6=NE !arg2 ? arg1 : arg0
// ISELB special case where arg0, arg1 values are 0, 1
(Equal cmp) => (ISELB [2] (MOVDconst [1]) cmp)
@ -1138,6 +1142,9 @@
(ISEL [n] x y (InvertFlags bool)) && n%4 == 0 => (ISEL [n+1] x y bool)
(ISEL [n] x y (InvertFlags bool)) && n%4 == 1 => (ISEL [n-1] x y bool)
(ISEL [n] x y (InvertFlags bool)) && n%4 == 2 => (ISEL [n] x y bool)
(XORconst [1] (ISELB [6] (MOVDconst [1]) cmp)) => (ISELB [2] (MOVDconst [1]) cmp)
(XORconst [1] (ISELB [5] (MOVDconst [1]) cmp)) => (ISELB [1] (MOVDconst [1]) cmp)
(XORconst [1] (ISELB [4] (MOVDconst [1]) cmp)) => (ISELB [0] (MOVDconst [1]) cmp)
// A particular pattern seen in cgo code:
(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (ANDconst [c&0xFF] x)

View File

@ -396,7 +396,7 @@ func init() {
{name: "CMPWUconst", argLength: 1, reg: gp1cr, asm: "CMPWU", aux: "Int32", typ: "Flags"},
// ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1
// ISEL auxInt values 4=GE 5=LE 6=NE arg2 ? arg1 : arg0
// ISEL auxInt values 4=GE 5=LE 6=NE !arg2 ? arg1 : arg0
// ISELB special case where arg0, arg1 values are 0, 1 for boolean result
{name: "ISEL", argLength: 3, reg: crgp21, asm: "ISEL", aux: "Int32", typ: "Int32"}, // see above
{name: "ISELB", argLength: 2, reg: crgp11, asm: "ISEL", aux: "Int32", typ: "Int32"}, // see above
@ -434,7 +434,7 @@ func init() {
{name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
{name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{callptr, ctxt, 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{callptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem

View File

@ -29,6 +29,7 @@ const (
riscv64REG_CTXT = 20
riscv64REG_LR = 1
riscv64REG_SP = 2
riscv64REG_GP = 3
riscv64REG_TP = 4
riscv64REG_TMP = 31
riscv64REG_ZERO = 0
@ -80,8 +81,8 @@ func init() {
// Add general purpose registers to gpMask.
switch r {
// ZERO, TP and TMP are not in any gp mask.
case riscv64REG_ZERO, riscv64REG_TP, riscv64REG_TMP:
// ZERO, GP, TP and TMP are not in any gp mask.
case riscv64REG_ZERO, riscv64REG_GP, riscv64REG_TP, riscv64REG_TMP:
case riscv64REG_G:
gpgMask |= mask
gpspsbgMask |= mask
@ -240,10 +241,10 @@ func init() {
{name: "MOVconvert", argLength: 2, reg: gp11, asm: "MOV"}, // arg0, but converted to int/ptr as appropriate; arg1=mem
// Calls
{name: "CALLstatic", argLength: 1, reg: call, aux: "CallOff", call: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: 1, reg: call, aux: "CallOff", call: true}, // tail call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: callClosure, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: callInter, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
{name: "CALLstatic", argLength: 1, reg: call, aux: "CallOff", call: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: 1, reg: call, aux: "CallOff", call: true, tailCall: true}, // tail call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: callClosure, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: callInter, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
// duffzero
// arg0 = address of memory to zero (in X10, changed as side effect)

View File

@ -480,7 +480,7 @@ func init() {
{name: "CLEAR", argLength: 2, reg: regInfo{inputs: []regMask{ptr, 0}}, asm: "CLEAR", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Write"},
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{ptrsp, buildReg("R12"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{ptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem

View File

@ -124,7 +124,7 @@ func init() {
var WasmOps = []opData{
{name: "LoweredStaticCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "LoweredTailCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "LoweredTailCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "LoweredClosureCall", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp, 0}, clobbers: callerSave}, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "LoweredInterCall", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem

View File

@ -63,6 +63,7 @@ type opData struct {
resultNotInArgs bool // outputs must not be allocated to the same registers as inputs
clobberFlags bool // this op clobbers flags register
call bool // is a function call
tailCall bool // is a tail call
nilCheck bool // this op is a nil check on arg0
faultOnNilArg0 bool // this op will fault if arg0 is nil (and aux encodes a small offset)
faultOnNilArg1 bool // this op will fault if arg1 is nil (and aux encodes a small offset)
@ -307,6 +308,9 @@ func genOp() {
if v.call {
fmt.Fprintln(w, "call: true,")
}
if v.tailCall {
fmt.Fprintln(w, "tailCall: true,")
}
if v.nilCheck {
fmt.Fprintln(w, "nilCheck: true,")
}
@ -405,6 +409,7 @@ func genOp() {
fmt.Fprintln(w, "func (o Op) SymEffect() SymEffect { return opcodeTable[o].symEffect }")
fmt.Fprintln(w, "func (o Op) IsCall() bool { return opcodeTable[o].call }")
fmt.Fprintln(w, "func (o Op) IsTailCall() bool { return opcodeTable[o].tailCall }")
fmt.Fprintln(w, "func (o Op) HasSideEffects() bool { return opcodeTable[o].hasSideEffects }")
fmt.Fprintln(w, "func (o Op) UnsafePoint() bool { return opcodeTable[o].unsafePoint }")
fmt.Fprintln(w, "func (o Op) ResultInArg0() bool { return opcodeTable[o].resultInArg0 }")

View File

@ -34,6 +34,7 @@ type opInfo struct {
resultNotInArgs bool // outputs must not be allocated to the same registers as inputs
clobberFlags bool // this op clobbers flags register
call bool // is a function call
tailCall bool // is a tail call
nilCheck bool // this op is a nil check on arg0
faultOnNilArg0 bool // this op will fault if arg0 is nil (and aux encodes a small offset)
faultOnNilArg1 bool // this op will fault if arg1 is nil (and aux encodes a small offset)
@ -102,6 +103,10 @@ func (a *AuxNameOffset) String() string {
return fmt.Sprintf("%s+%d", a.Name.Sym().Name, a.Offset)
}
func (a *AuxNameOffset) FrameOffset() int64 {
return a.Name.FrameOffset() + a.Offset
}
type AuxCall struct {
Fn *obj.LSym
reg *regInfo // regInfo for this call

File diff suppressed because it is too large Load Diff

View File

@ -559,7 +559,8 @@ func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, pos
func isLeaf(f *Func) bool {
for _, b := range f.Blocks {
for _, v := range b.Values {
if opcodeTable[v.Op].call {
if v.Op.IsCall() && !v.Op.IsTailCall() {
// tail call is not counted as it does not save the return PC or need a frame
return false
}
}
@ -634,6 +635,8 @@ func (s *regAllocState) init(f *Func) {
// nothing to do
case "ppc64le": // R2 already reserved.
// nothing to do
case "riscv64": // X3 (aka GP) and X4 (aka TP) already reserved.
// nothing to do
case "s390x":
s.allocatable &^= 1 << 11 // R11
default:
@ -1840,7 +1843,7 @@ func (s *regAllocState) regalloc(f *Func) {
if s.f.pass.debug > regDebug {
fmt.Printf("delete copied value %s\n", c.LongString())
}
c.RemoveArg(0)
c.resetArgs()
f.freeValue(c)
delete(s.copies, c)
progress = true

View File

@ -816,7 +816,11 @@ func devirtLECall(v *Value, sym *obj.LSym) *Value {
v.Op = OpStaticLECall
auxcall := v.Aux.(*AuxCall)
auxcall.Fn = sym
v.RemoveArg(0)
// Remove first arg
v.Args[0].Uses--
copy(v.Args[0:], v.Args[1:])
v.Args[len(v.Args)-1] = nil // aid GC
v.Args = v.Args[:len(v.Args)-1]
return v
}

View File

@ -222,6 +222,10 @@ func rewriteValueAMD64(v *Value) bool {
return rewriteValueAMD64_OpAMD64LEAQ4(v)
case OpAMD64LEAQ8:
return rewriteValueAMD64_OpAMD64LEAQ8(v)
case OpAMD64MOVBELstore:
return rewriteValueAMD64_OpAMD64MOVBELstore(v)
case OpAMD64MOVBEQstore:
return rewriteValueAMD64_OpAMD64MOVBEQstore(v)
case OpAMD64MOVBQSX:
return rewriteValueAMD64_OpAMD64MOVBQSX(v)
case OpAMD64MOVBQSXload:
@ -3623,6 +3627,43 @@ func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool {
v.copyOf(p)
return true
}
// match: (BSWAPL x:(MOVLload [i] {s} p mem))
// cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
// result: (MOVBELload [i] {s} p mem)
for {
x := v_0
if x.Op != OpAMD64MOVLload {
break
}
i := auxIntToInt32(x.AuxInt)
s := auxToSym(x.Aux)
mem := x.Args[1]
p := x.Args[0]
if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
break
}
v.reset(OpAMD64MOVBELload)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg2(p, mem)
return true
}
// match: (BSWAPL (MOVBELload [i] {s} p m))
// result: (MOVLload [i] {s} p m)
for {
if v_0.Op != OpAMD64MOVBELload {
break
}
i := auxIntToInt32(v_0.AuxInt)
s := auxToSym(v_0.Aux)
m := v_0.Args[1]
p := v_0.Args[0]
v.reset(OpAMD64MOVLload)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg2(p, m)
return true
}
return false
}
func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool {
@ -3637,6 +3678,43 @@ func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool {
v.copyOf(p)
return true
}
// match: (BSWAPQ x:(MOVQload [i] {s} p mem))
// cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
// result: (MOVBEQload [i] {s} p mem)
for {
x := v_0
if x.Op != OpAMD64MOVQload {
break
}
i := auxIntToInt32(x.AuxInt)
s := auxToSym(x.Aux)
mem := x.Args[1]
p := x.Args[0]
if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
break
}
v.reset(OpAMD64MOVBEQload)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg2(p, mem)
return true
}
// match: (BSWAPQ (MOVBEQload [i] {s} p m))
// result: (MOVQload [i] {s} p m)
for {
if v_0.Op != OpAMD64MOVBEQload {
break
}
i := auxIntToInt32(v_0.AuxInt)
s := auxToSym(v_0.Aux)
m := v_0.Args[1]
p := v_0.Args[0]
v.reset(OpAMD64MOVQload)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg2(p, m)
return true
}
return false
}
func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool {
@ -9395,6 +9473,52 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
}
return false
}
func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBELstore [i] {s} p (BSWAPL x) m)
// result: (MOVLstore [i] {s} p x m)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
if v_1.Op != OpAMD64BSWAPL {
break
}
x := v_1.Args[0]
m := v_2
v.reset(OpAMD64MOVLstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg3(p, x, m)
return true
}
return false
}
func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBEQstore [i] {s} p (BSWAPQ x) m)
// result: (MOVQstore [i] {s} p x m)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
if v_1.Op != OpAMD64BSWAPQ {
break
}
x := v_1.Args[0]
m := v_2
v.reset(OpAMD64MOVQstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg3(p, x, m)
return true
}
return false
}
func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
@ -12225,6 +12349,28 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVLstore [i] {s} p x:(BSWAPL w) mem)
// cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
// result: (MOVBELstore [i] {s} p w mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
x := v_1
if x.Op != OpAMD64BSWAPL {
break
}
w := x.Args[0]
mem := v_2
if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
break
}
v.reset(OpAMD64MOVBELstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
return false
}
func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
@ -13164,6 +13310,28 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVQstore [i] {s} p x:(BSWAPQ w) mem)
// cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
// result: (MOVBEQstore [i] {s} p w mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
x := v_1
if x.Op != OpAMD64BSWAPQ {
break
}
w := x.Args[0]
mem := v_2
if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
break
}
v.reset(OpAMD64MOVBEQstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
return false
}
func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
@ -18657,6 +18825,81 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
break
}
// match: (ORQ x0:(MOVBELload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVBELload [i1] {s} p mem)))
// cond: i0 == i1+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
// result: @mergePoint(b,x0,x1) (MOVBEQload [i1] {s} p mem)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x0 := v_0
if x0.Op != OpAMD64MOVBELload {
continue
}
i0 := auxIntToInt32(x0.AuxInt)
s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
sh := v_1
if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
continue
}
x1 := sh.Args[0]
if x1.Op != OpAMD64MOVBELload {
continue
}
i1 := auxIntToInt32(x1.AuxInt)
if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
if p != x1.Args[0] || mem != x1.Args[1] || !(i0 == i1+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
continue
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVBEQload, typ.UInt64)
v.copyOf(v0)
v0.AuxInt = int32ToAuxInt(i1)
v0.Aux = symToAux(s)
v0.AddArg2(p, mem)
return true
}
break
}
// match: (ORQ x0:(MOVBELload [i] {s} p0 mem) sh:(SHLQconst [32] x1:(MOVBELload [i] {s} p1 mem)))
// cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p1, p0, 4) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
// result: @mergePoint(b,x0,x1) (MOVBEQload [i] {s} p1 mem)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x0 := v_0
if x0.Op != OpAMD64MOVBELload {
continue
}
i := auxIntToInt32(x0.AuxInt)
s := auxToSym(x0.Aux)
mem := x0.Args[1]
p0 := x0.Args[0]
sh := v_1
if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
continue
}
x1 := sh.Args[0]
if x1.Op != OpAMD64MOVBELload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
p1 := x1.Args[0]
if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p1, p0, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
continue
}
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpAMD64MOVBEQload, typ.UInt64)
v.copyOf(v0)
v0.AuxInt = int32ToAuxInt(i)
v0.Aux = symToAux(s)
v0.AddArg2(p1, mem)
return true
}
break
}
return false
}
func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {

View File

@ -1168,23 +1168,8 @@ func rewriteValuePPC64_OpCondSelect(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
// match: (CondSelect x y bool)
// cond: flagArg(bool) != nil
// result: (ISEL [2] x y bool)
for {
x := v_0
y := v_1
bool := v_2
if !(flagArg(bool) != nil) {
break
}
v.reset(OpPPC64ISEL)
v.AuxInt = int32ToAuxInt(2)
v.AddArg3(x, y, bool)
return true
}
// match: (CondSelect x y bool)
// cond: flagArg(bool) == nil
// result: (ISEL [2] x y (CMPWconst [0] bool))
// result: (ISEL [6] x y (CMPWconst [0] bool))
for {
x := v_0
y := v_1
@ -1193,7 +1178,7 @@ func rewriteValuePPC64_OpCondSelect(v *Value) bool {
break
}
v.reset(OpPPC64ISEL)
v.AuxInt = int32ToAuxInt(2)
v.AuxInt = int32ToAuxInt(6)
v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags)
v0.AuxInt = int32ToAuxInt(0)
v0.AddArg(bool)
@ -5910,6 +5895,28 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool {
v.AddArg(y)
return true
}
// match: (ISEL [6] x y (CMPWconst [0] (ISELB [c] one cmp)))
// result: (ISEL [c] x y cmp)
for {
if auxIntToInt32(v.AuxInt) != 6 {
break
}
x := v_0
y := v_1
if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
break
}
v_2_0 := v_2.Args[0]
if v_2_0.Op != OpPPC64ISELB {
break
}
c := auxIntToInt32(v_2_0.AuxInt)
cmp := v_2_0.Args[1]
v.reset(OpPPC64ISEL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg3(x, y, cmp)
return true
}
// match: (ISEL [2] x _ (FlagEQ))
// result: x
for {
@ -11372,6 +11379,28 @@ func rewriteValuePPC64_OpPPC64NEG(v *Value) bool {
v.AddArg(x)
return true
}
// match: (NEG (SUB x y))
// result: (SUB y x)
for {
if v_0.Op != OpPPC64SUB {
break
}
y := v_0.Args[1]
x := v_0.Args[0]
v.reset(OpPPC64SUB)
v.AddArg2(y, x)
return true
}
// match: (NEG (NEG x))
// result: x
for {
if v_0.Op != OpPPC64NEG {
break
}
x := v_0.Args[0]
v.copyOf(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64NOR(v *Value) bool {
@ -13912,6 +13941,8 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
}
func rewriteValuePPC64_OpPPC64XORconst(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
// match: (XORconst [c] (XORconst [d] x))
// result: (XORconst [c^d] x)
for {
@ -13936,6 +13967,60 @@ func rewriteValuePPC64_OpPPC64XORconst(v *Value) bool {
v.copyOf(x)
return true
}
// match: (XORconst [1] (ISELB [6] (MOVDconst [1]) cmp))
// result: (ISELB [2] (MOVDconst [1]) cmp)
for {
if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpPPC64ISELB || auxIntToInt32(v_0.AuxInt) != 6 {
break
}
cmp := v_0.Args[1]
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v.reset(OpPPC64ISELB)
v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v0.AuxInt = int64ToAuxInt(1)
v.AddArg2(v0, cmp)
return true
}
// match: (XORconst [1] (ISELB [5] (MOVDconst [1]) cmp))
// result: (ISELB [1] (MOVDconst [1]) cmp)
for {
if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpPPC64ISELB || auxIntToInt32(v_0.AuxInt) != 5 {
break
}
cmp := v_0.Args[1]
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v.reset(OpPPC64ISELB)
v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v0.AuxInt = int64ToAuxInt(1)
v.AddArg2(v0, cmp)
return true
}
// match: (XORconst [1] (ISELB [4] (MOVDconst [1]) cmp))
// result: (ISELB [0] (MOVDconst [1]) cmp)
for {
if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpPPC64ISELB || auxIntToInt32(v_0.AuxInt) != 4 {
break
}
cmp := v_0.Args[1]
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
break
}
v.reset(OpPPC64ISELB)
v.AuxInt = int32ToAuxInt(0)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v0.AuxInt = int64ToAuxInt(1)
v.AddArg2(v0, cmp)
return true
}
return false
}
func rewriteValuePPC64_OpPanicBounds(v *Value) bool {

View File

@ -196,11 +196,7 @@ func shortcircuitBlock(b *Block) bool {
// Remove b's incoming edge from p.
b.removePred(cidx)
n := len(b.Preds)
ctl.Args[cidx].Uses--
ctl.Args[cidx] = ctl.Args[n]
ctl.Args[n] = nil
ctl.Args = ctl.Args[:n]
b.removePhiArg(ctl, cidx)
// Redirect p's outgoing edge to t.
p.Succs[pi] = Edge{t, len(t.Preds)}

View File

@ -2,7 +2,7 @@ package ssa_test
import (
cmddwarf "cmd/internal/dwarf"
"cmd/internal/str"
"cmd/internal/quoted"
"debug/dwarf"
"debug/elf"
"debug/macho"
@ -58,7 +58,7 @@ func TestStmtLines(t *testing.T) {
if extld == "" {
extld = "gcc"
}
extldArgs, err := str.SplitQuotedFields(extld)
extldArgs, err := quoted.Split(extld)
if err != nil {
t.Fatal(err)
}

View File

@ -302,12 +302,6 @@ func (v *Value) SetArg(i int, w *Value) {
v.Args[i] = w
w.Uses++
}
func (v *Value) RemoveArg(i int) {
v.Args[i].Uses--
copy(v.Args[i:], v.Args[i+1:])
v.Args[len(v.Args)-1] = nil // aid GC
v.Args = v.Args[:len(v.Args)-1]
}
func (v *Value) SetArgs1(a *Value) {
v.resetArgs()
v.AddArg(a)

View File

@ -108,6 +108,8 @@ func InitConfig() {
ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove")
ir.Syms.Asanread = typecheck.LookupRuntimeFunc("asanread")
ir.Syms.Asanwrite = typecheck.LookupRuntimeFunc("asanwrite")
ir.Syms.Newobject = typecheck.LookupRuntimeFunc("newobject")
ir.Syms.Newproc = typecheck.LookupRuntimeFunc("newproc")
ir.Syms.Panicdivide = typecheck.LookupRuntimeFunc("panicdivide")
@ -1245,10 +1247,10 @@ func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind)
}
// instrumentFields instruments a read/write operation on addr.
// If it is instrumenting for MSAN and t is a struct type, it instruments
// If it is instrumenting for MSAN or ASAN and t is a struct type, it instruments
// operation for each field, instead of for the whole struct.
func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) {
if !base.Flag.MSan || !t.IsStruct() {
if !(base.Flag.MSan || base.Flag.ASan) || !t.IsStruct() {
s.instrument(t, addr, kind)
return
}
@ -1327,6 +1329,16 @@ func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrume
default:
panic("unreachable")
}
} else if base.Flag.ASan {
switch kind {
case instrumentRead:
fn = ir.Syms.Asanread
case instrumentWrite:
fn = ir.Syms.Asanwrite
default:
panic("unreachable")
}
needWidth = true
} else {
panic("unreachable")
}
@ -3002,7 +3014,7 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
}
// If n is addressable and can't be represented in
// SSA, then load just the selected field. This
// prevents false memory dependencies in race/msan
// prevents false memory dependencies in race/msan/asan
// instrumentation.
if ir.IsAddressable(n) && !s.canSSA(n) {
p := s.addr(n)
@ -4421,7 +4433,7 @@ func InitTables() {
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64)
sys.AMD64, sys.ARM64, sys.PPC64)
addF("math/bits", "Len32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
@ -4430,7 +4442,7 @@ func InitTables() {
x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
sys.ARM, sys.S390X, sys.MIPS, sys.Wasm)
addF("math/bits", "Len16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
@ -6717,6 +6729,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
s.livenessMap, s.partLiveArgs = liveness.Compute(e.curfn, f, e.stkptrsize, pp)
emitArgInfo(e, f, pp)
argLiveBlockMap, argLiveValueMap := liveness.ArgLiveness(e.curfn, f, pp)
openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
if openDeferInfo != nil {
@ -6774,6 +6787,8 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
// Progs that are in the set above and have that source position.
var inlMarksByPos map[src.XPos][]*obj.Prog
var argLiveIdx int = -1 // argument liveness info index
// Emit basic blocks
for i, b := range f.Blocks {
s.bstart[b.ID] = s.pp.Next
@ -6787,6 +6802,13 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
// preemptible, unless this function is "all unsafe".
s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)}
if idx, ok := argLiveBlockMap[b.ID]; ok && idx != argLiveIdx {
argLiveIdx = idx
p := s.pp.Prog(obj.APCDATA)
p.From.SetConst(objabi.PCDATA_ArgLiveIndex)
p.To.SetConst(int64(idx))
}
// Emit values in block
Arch.SSAMarkMoves(&s, b)
for _, v := range b.Values {
@ -6843,6 +6865,13 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
Arch.SSAGenValue(&s, v)
}
if idx, ok := argLiveValueMap[v.ID]; ok && idx != argLiveIdx {
argLiveIdx = idx
p := s.pp.Prog(obj.APCDATA)
p.From.SetConst(objabi.PCDATA_ArgLiveIndex)
p.To.SetConst(int64(idx))
}
if base.Ctxt.Flag_locationlists {
valueToProgAfter[v.ID] = s.pp.Next
}

View File

@ -130,7 +130,7 @@ func testSyntaxErrors(t *testing.T, filename string) {
var mode Mode
if strings.HasSuffix(filename, ".go2") {
mode = AllowGenerics | AllowTypeSets
mode = AllowGenerics
}
ParseFile(filename, func(err error) {
e, ok := err.(Error)

View File

@ -586,35 +586,54 @@ func (p *parser) typeDecl(group *Group) Decl {
d.Pragma = p.takePragma()
d.Name = p.name()
if p.tok == _Lbrack {
// array/slice or generic type
if p.allowGenerics() && p.tok == _Lbrack {
// d.Name "[" ...
// array/slice or type parameter list
pos := p.pos()
p.next()
switch p.tok {
case _Rbrack:
p.next()
d.Type = p.sliceType(pos)
case _Name:
// array or generic type
p.xnest++
x := p.expr()
p.xnest--
if name0, ok := x.(*Name); p.allowGenerics() && ok && p.tok != _Rbrack {
// generic type
d.TParamList = p.paramList(name0, _Rbrack, true)
pos := p.pos()
if p.gotAssign() {
p.syntaxErrorAt(pos, "generic type cannot be alias")
// d.Name "[" name ...
// array or type parameter list
name := p.name()
// Index or slice expressions are never constant and thus invalid
// array length expressions. Thus, if we see a "[" following name
// we can safely assume that "[" name starts a type parameter list.
var x Expr // x != nil means x is the array length expression
if p.tok != _Lbrack {
// d.Name "[" name ...
// If we reach here, the next token is not a "[", and we need to
// parse the expression starting with name. If that expression is
// just that name, not followed by a "]" (in which case we might
// have the array length "[" name "]"), we can also safely assume
// a type parameter list.
p.xnest++
// To parse the expression starting with name, expand the call
// sequence we would get by passing in name to parser.expr, and
// pass in name to parser.pexpr.
x = p.binaryExpr(p.pexpr(name, false), 0)
p.xnest--
if x == name && p.tok != _Rbrack {
x = nil
}
}
if x == nil {
// d.Name "[" name ...
// type parameter list
d.TParamList = p.paramList(name, _Rbrack, true)
d.Alias = p.gotAssign()
d.Type = p.typeOrNil()
} else {
// d.Name "[" x "]" ...
// x is the array length expression
if debug && x == nil {
panic("length expression is nil")
}
d.Type = p.arrayType(pos, x)
}
case _Rbrack:
// d.Name "[" "]" ...
p.next()
d.Type = p.sliceType(pos)
default:
// d.Name "[" ...
d.Type = p.arrayType(pos, nil)
}
} else {
@ -689,15 +708,7 @@ func (p *parser) funcDeclOrNil() *FuncDecl {
}
f.Name = p.name()
if p.allowGenerics() && p.got(_Lbrack) {
if p.tok == _Rbrack {
p.syntaxError("empty type parameter list")
p.next()
} else {
f.TParamList = p.paramList(nil, _Rbrack, true)
}
}
f.Type = p.funcType()
f.TParamList, f.Type = p.funcType("")
if p.tok == _Lbrace {
f.Body = p.funcBody()
}
@ -729,14 +740,16 @@ func (p *parser) expr() Expr {
defer p.trace("expr")()
}
return p.binaryExpr(0)
return p.binaryExpr(nil, 0)
}
// Expression = UnaryExpr | Expression binary_op Expression .
func (p *parser) binaryExpr(prec int) Expr {
func (p *parser) binaryExpr(x Expr, prec int) Expr {
// don't trace binaryExpr - only leads to overly nested trace output
x := p.unaryExpr()
if x == nil {
x = p.unaryExpr()
}
for (p.tok == _Operator || p.tok == _Star) && p.prec > prec {
t := new(Operation)
t.pos = p.pos()
@ -744,7 +757,7 @@ func (p *parser) binaryExpr(prec int) Expr {
tprec := p.prec
p.next()
t.X = x
t.Y = p.binaryExpr(tprec)
t.Y = p.binaryExpr(nil, tprec)
x = t
}
return x
@ -839,7 +852,7 @@ func (p *parser) unaryExpr() Expr {
// TODO(mdempsky): We need parens here so we can report an
// error for "(x) := true". It should be possible to detect
// and reject that more efficiently though.
return p.pexpr(true)
return p.pexpr(nil, true)
}
// callStmt parses call-like statements that can be preceded by 'defer' and 'go'.
@ -853,7 +866,7 @@ func (p *parser) callStmt() *CallStmt {
s.Tok = p.tok // _Defer or _Go
p.next()
x := p.pexpr(p.tok == _Lparen) // keep_parens so we can report error below
x := p.pexpr(nil, p.tok == _Lparen) // keep_parens so we can report error below
if t := unparen(x); t != x {
p.errorAt(x.Pos(), fmt.Sprintf("expression in %s must not be parenthesized", s.Tok))
// already progressed, no need to advance
@ -923,7 +936,7 @@ func (p *parser) operand(keep_parens bool) Expr {
case _Func:
pos := p.pos()
p.next()
ftyp := p.funcType()
_, ftyp := p.funcType("function literal")
if p.tok == _Lbrace {
p.xnest++
@ -969,12 +982,14 @@ func (p *parser) operand(keep_parens bool) Expr {
// "]" .
// TypeAssertion = "." "(" Type ")" .
// Arguments = "(" [ ( ExpressionList | Type [ "," ExpressionList ] ) [ "..." ] [ "," ] ] ")" .
func (p *parser) pexpr(keep_parens bool) Expr {
func (p *parser) pexpr(x Expr, keep_parens bool) Expr {
if trace {
defer p.trace("pexpr")()
}
x := p.operand(keep_parens)
if x == nil {
x = p.operand(keep_parens)
}
loop:
for {
@ -1261,7 +1276,8 @@ func (p *parser) typeOrNil() Expr {
case _Func:
// fntype
p.next()
return p.funcType()
_, t := p.funcType("function type")
return t
case _Lbrack:
// '[' oexpr ']' ntype
@ -1334,18 +1350,34 @@ func (p *parser) typeInstance(typ Expr) Expr {
return x
}
func (p *parser) funcType() *FuncType {
// If context != "", type parameters are not permitted.
func (p *parser) funcType(context string) ([]*Field, *FuncType) {
if trace {
defer p.trace("funcType")()
}
typ := new(FuncType)
typ.pos = p.pos()
var tparamList []*Field
if p.allowGenerics() && p.got(_Lbrack) {
if context != "" {
// accept but complain
p.syntaxErrorAt(typ.pos, context+" cannot have type parameters")
}
if p.tok == _Rbrack {
p.syntaxError("empty type parameter list")
p.next()
} else {
tparamList = p.paramList(nil, _Rbrack, true)
}
}
p.want(_Lparen)
typ.ParamList = p.paramList(nil, _Rparen, false)
typ.ResultList = p.funcResult()
return typ
return tparamList, typ
}
// "[" has already been consumed, and pos is its position.
@ -1674,11 +1706,13 @@ func (p *parser) methodDecl() *Field {
// already progressed, no need to advance
}
const context = "interface method"
switch p.tok {
case _Lparen:
// method
f.Name = name
f.Type = p.funcType()
_, f.Type = p.funcType(context)
case _Lbrack:
if p.allowGenerics() {
@ -1698,7 +1732,7 @@ func (p *parser) methodDecl() *Field {
// name[](
p.errorAt(pos, "empty type parameter list")
f.Name = name
f.Type = p.funcType()
_, f.Type = p.funcType(context)
} else {
p.errorAt(pos, "empty type argument list")
f.Type = name
@ -1715,7 +1749,7 @@ func (p *parser) methodDecl() *Field {
// as if [] were absent.
if p.tok == _Lparen {
f.Name = name
f.Type = p.funcType()
_, f.Type = p.funcType(context)
} else {
f.Type = name
}
@ -1726,7 +1760,7 @@ func (p *parser) methodDecl() *Field {
if list[0].Name != nil {
// generic method
f.Name = name
f.Type = p.funcType()
_, f.Type = p.funcType(context)
// TODO(gri) Record list as type parameter list with f.Type
// if we want to type-check the generic method.
// For now, report an error so this is not a silent event.
@ -1816,11 +1850,11 @@ func (p *parser) embeddedTerm() Expr {
// ParameterDecl = [ IdentifierList ] [ "..." ] Type .
func (p *parser) paramDeclOrNil(name *Name, follow token) *Field {
if trace {
defer p.trace("paramDecl")()
defer p.trace("paramDeclOrNil")()
}
// type set notation is ok in type parameter lists
typeSetsOk := p.mode&AllowTypeSets != 0 && follow == _Rbrack
typeSetsOk := follow == _Rbrack
pos := p.pos()
if name != nil {
@ -1849,6 +1883,11 @@ func (p *parser) paramDeclOrNil(name *Name, follow token) *Field {
// name "[" n "]" E
f.Name = name
}
if typeSetsOk && p.tok == _Operator && p.op == Or {
// name "[" ... "]" "|" ...
// name "[" n "]" E "|" ...
f = p.embeddedElem(f)
}
return f
}

View File

@ -26,11 +26,11 @@ var (
)
func TestParse(t *testing.T) {
ParseFile(*src_, func(err error) { t.Error(err) }, nil, AllowGenerics|AllowTypeSets)
ParseFile(*src_, func(err error) { t.Error(err) }, nil, AllowGenerics)
}
func TestVerify(t *testing.T) {
ast, err := ParseFile(*src_, func(err error) { t.Error(err) }, nil, AllowGenerics|AllowTypeSets)
ast, err := ParseFile(*src_, func(err error) { t.Error(err) }, nil, AllowGenerics)
if err != nil {
return // error already reported
}
@ -46,7 +46,7 @@ func TestParseGo2(t *testing.T) {
for _, fi := range list {
name := fi.Name()
if !fi.IsDir() && !strings.HasPrefix(name, ".") {
ParseFile(filepath.Join(dir, name), func(err error) { t.Error(err) }, nil, AllowGenerics|AllowTypeSets)
ParseFile(filepath.Join(dir, name), func(err error) { t.Error(err) }, nil, AllowGenerics)
}
}
}

Some files were not shown because too many files have changed in this diff Show More