mirror of
https://github.com/golang/go
synced 2024-11-11 23:50:22 -07:00
[dev.typeparams] all: merge master (2725522
) into dev.typeparams
Merge List: + 2021-06-01272552275f
A+C: update name + 2021-06-012bec019fb5
doc/go1.17: add release notes for register ABI + 2021-06-012e59cc5fb4
cmd/go: add [-src] to documentation + 2021-06-010b80cf1136
cmd/go: make 'go get' save sums for incidentally updated modules + 2021-05-303b770f2ccb
go/types: don't declare 'comparable' when typeparams are disabled + 2021-05-301607c28172
go/types: unexport the GoVersion configuration option for Go 1.17 + 2021-05-2979bda65041
doc/go1.17: mention time.Layout + 2021-05-29f6cc392d1d
doc/go1.17: document text/template/parse.SkipFuncCheck + 2021-05-281419ca7cea
doc/go1.17: mention new definitions of MSG_CMSG_CLOEXEC + 2021-05-286624771c83
doc/go1.17: mention testing.[TB].Setenv methods + 2021-05-28bbda923592
doc/go1.17: mention new Windows SysProcAttr fields + 2021-05-286f58088bd8
doc/go1.17: document new go/build/BuildContext.ToolTags field + 2021-05-28c295107708
doc/go1.17: mention new encoding/csv/Reader.FieldPos method + 2021-05-28ccd9784edf
doc/go1.17: document new debug/elf constant + 2021-05-283de3440fb9
go/ast: remove FuncDecl.IsMethod for Go 1.17 + 2021-05-27639acdc833
doc/go1.17: clarify that compress/lzw Reader and Writer types are new + 2021-05-27193d514131
net/http: correct Client.Do doc about context cancelation + 2021-05-27ab2ef4aaa7
doc/go1.17: document reflect changes + 2021-05-270ece95a0fe
cmd/go: don't let 'go mod download' save sums for inconsistent requirements + 2021-05-27cdcd02842d
net: verify results from Lookup* are valid domain names + 2021-05-278bf5bf5173
cmd/compile: improve debug locations for partially live in-params + 2021-05-2756af34f875
cmd/compile: place reg spills after OpArg{Int,Float}Reg ops + 2021-05-27db66e9e15d
cmd/link: accept Windows line-ending in TestTrampolineCgo + 2021-05-276b8c94b6c5
go/types: guard against check==nil in newNamed + 2021-05-27fca7b8f3e6
Revert "net: verify results from Lookup* are valid domain names" + 2021-05-27950fa11c4c
net/http/httputil: always remove hop-by-hop headers + 2021-05-279bc52686da
cmd/go,cmd/link: do not check for staleness in most tests + 2021-05-276ff0ae2aa4
crypto/elliptic: fix typo in p521Point type name + 2021-05-263075ffc93e
os: deflake TestFdReadRace + 2021-05-26a62c08734f
src/os: revert accidentally submitted change + 2021-05-261d5298d46a
doc/go1.17: document net/... changes + 2021-05-260fbecece98
doc/go1.17: document syscall changes + 2021-05-2602beecb397
mime: document use of the Shared MIME-Info Database + 2021-05-26a92460fd2f
doc/go1.17: add release notes for runtime/metrics package + 2021-05-2655aefbb268
doc/go1.17: mention enabling frame pointer on all ARM64 + 2021-05-2639da9ae513
go/types: ensure that Named.check is nilled out once it is expanded + 2021-05-26bfd7798a6c
runtime,cmd/link/internal/ld: fix typos + 2021-05-26e4615ad74d
math/big: move division into natdiv.go + 2021-05-26d050238bb6
doc/go1.17: fix formatting for time changes + 2021-05-2574242baa41
archive/zip: only preallocate File slice if reasonably sized Change-Id: I8a02edee1a6889547c52aa28c53cf8250766ab2c
This commit is contained in:
commit
c9d1a2bdd2
2
AUTHORS
2
AUTHORS
@ -41,7 +41,7 @@ Aeneas Rekkas (arekkas) <aeneas@ory.am>
|
|||||||
Afanasev Stanislav <phpprogger@gmail.com>
|
Afanasev Stanislav <phpprogger@gmail.com>
|
||||||
Agis Anastasopoulos <agis.anast@gmail.com>
|
Agis Anastasopoulos <agis.anast@gmail.com>
|
||||||
Agniva De Sarker <agnivade@yahoo.co.in>
|
Agniva De Sarker <agnivade@yahoo.co.in>
|
||||||
Ahmed Wahed <oneofone@gmail.com>
|
Ahmed W. Mones <oneofone@gmail.com>
|
||||||
Ahmet Soormally <ahmet@mangomm.co.uk>
|
Ahmet Soormally <ahmet@mangomm.co.uk>
|
||||||
Ahmy Yulrizka <yulrizka@gmail.com>
|
Ahmy Yulrizka <yulrizka@gmail.com>
|
||||||
Aiden Scandella <ai@uber.com>
|
Aiden Scandella <ai@uber.com>
|
||||||
|
@ -67,7 +67,7 @@ Aeneas Rekkas (arekkas) <aeneas@ory.am>
|
|||||||
Afanasev Stanislav <phpprogger@gmail.com>
|
Afanasev Stanislav <phpprogger@gmail.com>
|
||||||
Agis Anastasopoulos <agis.anast@gmail.com>
|
Agis Anastasopoulos <agis.anast@gmail.com>
|
||||||
Agniva De Sarker <agnivade@yahoo.co.in>
|
Agniva De Sarker <agnivade@yahoo.co.in>
|
||||||
Ahmed Wahed <oneofone@gmail.com>
|
Ahmed W. Mones <oneofone@gmail.com>
|
||||||
Ahmet Alp Balkan <ahmetb@google.com>
|
Ahmet Alp Balkan <ahmetb@google.com>
|
||||||
Ahmet Soormally <ahmet@mangomm.co.uk>
|
Ahmet Soormally <ahmet@mangomm.co.uk>
|
||||||
Ahmy Yulrizka <yulrizka@gmail.com>
|
Ahmy Yulrizka <yulrizka@gmail.com>
|
||||||
|
196
doc/go1.17.html
196
doc/go1.17.html
@ -68,6 +68,14 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
OpenBSD.
|
OpenBSD.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
<h3 id="arm64">ARM64</h3>
|
||||||
|
|
||||||
|
<p><!-- CL 288814 -->
|
||||||
|
Go programs now maintain stack frame pointers on the 64-bit ARM
|
||||||
|
architecture on all operating systems. Previously it maintained
|
||||||
|
stack frame pointers only on Linux, macOS, and iOS.
|
||||||
|
</p>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
TODO: complete the Ports section
|
TODO: complete the Ports section
|
||||||
</p>
|
</p>
|
||||||
@ -218,31 +226,57 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
|
|
||||||
<h2 id="runtime">Runtime</h2>
|
<h2 id="runtime">Runtime</h2>
|
||||||
|
|
||||||
<p><!-- CL 304470 -->
|
|
||||||
TODO: <a href="https://golang.org/cl/304470">https://golang.org/cl/304470</a>: cmd/compile, runtime: add metadata for argument printing in traceback
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
TODO: complete the Runtime section
|
TODO: complete the Runtime section
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<h2 id="compiler">Compiler</h2>
|
<h2 id="compiler">Compiler</h2>
|
||||||
|
|
||||||
<p><!-- CL 283112, golang.org/issue/28727 -->
|
<p><!-- golang.org/issue/40724 -->
|
||||||
|
Go 1.17 implements a new way of passing function arguments and results using
|
||||||
|
registers instead of the stack. This work is enabled for Linux, MacOS, and
|
||||||
|
Windows on the 64-bit x86 architecture (the <code>linux/amd64</code>,
|
||||||
|
<code>darwin/amd64</code>, <code>windows/amd64</code> ports). For a
|
||||||
|
representative set of Go packages and programs, benchmarking has shown
|
||||||
|
performance improvements of about 5%, and a typical reduction in binary size
|
||||||
|
of about 2%.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
This change does not affect the functionality of any safe Go code. It can affect
|
||||||
|
code outside the <a href="/doc/go1compat">compatibility guidelines</a> with
|
||||||
|
minimal impact. To maintain compatibility with existing assembly functions,
|
||||||
|
adapter functions converting between the new register-based calling convention
|
||||||
|
and the previous stack-based calling convention (also known as ABI wrappers)
|
||||||
|
are sometimes used. This is mostly invisible to users, except for assembly
|
||||||
|
functions that have their addresses taken in Go. Using <code>reflect.ValueOf(fn).Pointer()</code>
|
||||||
|
(or similar approaches such as via <code>unsafe.Pointer</code>) to get the address
|
||||||
|
of an assembly function will now return the address of the ABI wrapper. This is
|
||||||
|
mostly harmless, except for special-purpose assembly code (such as accessing
|
||||||
|
thread-local storage or requiring a special stack alignment). Assembly functions
|
||||||
|
called indirectly from Go via <code>func</code> values will now be made through
|
||||||
|
ABI wrappers, which may cause a very small performance overhead. Also, calling
|
||||||
|
Go functions from assembly may now go through ABI wrappers, with a very small
|
||||||
|
performance overhead.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p><!-- CL 304470 -->
|
||||||
|
The format of stack traces from the runtime (printed when an uncaught panic
|
||||||
|
occurs, or when <code>runtime.Stack</code> is called) is improved. Previously,
|
||||||
|
the function arguments were printed as hexadecimal words based on the memory
|
||||||
|
layout. Now each argument in the source code is printed separately, separated
|
||||||
|
by commas. Aggregate-typed (struct, array, string, slice, interface, and complex)
|
||||||
|
arguments are delimited by curly braces. A caveat is that the value of an
|
||||||
|
argument that only lives in a register and is not stored to memory may be
|
||||||
|
inaccurate. Results (which were usually inaccurate) are no longer printed.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p><!-- CL 283112, golang.org/issue/28727 -->
|
||||||
Functions containing closures can now be inlined. One effect of this change is
|
Functions containing closures can now be inlined. One effect of this change is
|
||||||
that a function with a closure may actually produce a distinct closure function
|
that a function with a closure may actually produce a distinct closure function
|
||||||
for each place that the function is inlined. Hence, this change could reveal
|
for each place that the function is inlined. Hence, this change could reveal
|
||||||
bugs where Go functions are compared (incorrectly) by pointer value. Go
|
bugs where Go functions are compared (incorrectly) by pointer value. Go
|
||||||
functions are by definition not comparable.
|
functions are by definition not comparable.
|
||||||
|
|
||||||
TODO: complete the Compiler section, or delete if not needed
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<h2 id="linker">Linker</h2>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
TODO: complete the Linker section, or delete if not needed
|
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<h2 id="library">Core library</h2>
|
<h2 id="library">Core library</h2>
|
||||||
@ -323,11 +357,16 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
<dl id="compress/lzw"><dt><a href="/pkg/compress/lzw/">compress/lzw</a></dt>
|
<dl id="compress/lzw"><dt><a href="/pkg/compress/lzw/">compress/lzw</a></dt>
|
||||||
<dd>
|
<dd>
|
||||||
<p><!-- CL 273667 -->
|
<p><!-- CL 273667 -->
|
||||||
The new
|
The <a href="/pkg/compress/lzw/#NewReader"><code>NewReader</code></a>
|
||||||
<a href="/pkg/compress/lzw/#Reader.Reset"><code>Reader.Reset</code></a>
|
function is guaranteed to return a value of the new
|
||||||
and
|
type <a href="/pkg/compress/lzw/#Reader"><code>Reader</code></a>,
|
||||||
<a href="/pkg/compress/lzw/#Writer.Reset"><code>Writer.Reset</code></a>
|
and similarly <a href="/pkg/compress/lzw/#NewWriter"><code>NewWriter</code></a>
|
||||||
methods allow reuse of a <code>Reader</code> or <code>Writer</code>.
|
is guaranteed to return a value of the new
|
||||||
|
type <a href="/pkg/compress/lzw/#Writer"><code>Writer</code></a>.
|
||||||
|
These new types both implement a <code>Reset</code> method
|
||||||
|
(<a href="/pkg/compress/lzw/#Reader.Reset"><code>Reader.Reset</code></a>,
|
||||||
|
<a href="/pkg/compress/lzw/#Writer.Reset"><code>Writer.Reset</code></a>)
|
||||||
|
that allows reuse of the <code>Reader</code> or <code>Writer</code>.
|
||||||
</p>
|
</p>
|
||||||
</dd>
|
</dd>
|
||||||
</dl><!-- compress/lzw -->
|
</dl><!-- compress/lzw -->
|
||||||
@ -360,6 +399,15 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
</dd>
|
</dd>
|
||||||
</dl><!-- database/sql -->
|
</dl><!-- database/sql -->
|
||||||
|
|
||||||
|
<dl id="debug/elf"><dt><a href="/pkg/debug/elf/">debug/elf</a></dt>
|
||||||
|
<dd>
|
||||||
|
<p><!-- CL 239217 -->
|
||||||
|
The <a href="/pkg/debug/elf/#SHT_MIPS_ABIFLAGS"><code>SHT_MIPS_ABIFLAGS</code></a>
|
||||||
|
constant has been added.
|
||||||
|
</p>
|
||||||
|
</dd>
|
||||||
|
</dl><!-- debug/elf -->
|
||||||
|
|
||||||
<dl id="encoding/binary"><dt><a href="/pkg/encoding/binary/">encoding/binary</a></dt>
|
<dl id="encoding/binary"><dt><a href="/pkg/encoding/binary/">encoding/binary</a></dt>
|
||||||
<dd>
|
<dd>
|
||||||
<p><!-- CL 299531 -->
|
<p><!-- CL 299531 -->
|
||||||
@ -371,6 +419,18 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
</dd>
|
</dd>
|
||||||
</dl><!-- encoding/binary -->
|
</dl><!-- encoding/binary -->
|
||||||
|
|
||||||
|
<dl id="encoding/csv"><dt><a href="/pkg/encoding/csv/">encoding/csv</a></dt>
|
||||||
|
<dd>
|
||||||
|
<p><!-- CL 291290 -->
|
||||||
|
The new
|
||||||
|
<a href="/pkg/encoding/csv/#Reader.FieldPos"><code>Reader.FieldPos</code></a>
|
||||||
|
method returns the line and column corresponding to the start of
|
||||||
|
a given field in the record most recently returned by
|
||||||
|
<a href="/pkg/encoding/csv/#Reader.Read"><code>Read</code></a>.
|
||||||
|
</p>
|
||||||
|
</dd>
|
||||||
|
</dl><!-- encoding/csv -->
|
||||||
|
|
||||||
<dl id="flag"><dt><a href="/pkg/flag/">flag</a></dt>
|
<dl id="flag"><dt><a href="/pkg/flag/">flag</a></dt>
|
||||||
<dd>
|
<dd>
|
||||||
<p><!-- CL 271788 -->
|
<p><!-- CL 271788 -->
|
||||||
@ -379,6 +439,17 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
</dd>
|
</dd>
|
||||||
</dl><!-- flag -->
|
</dl><!-- flag -->
|
||||||
|
|
||||||
|
<dl id="go/build"><dt><a href="/pkg/go/build/">go/build</a></dt>
|
||||||
|
<dd>
|
||||||
|
<p><!-- CL 310732 -->
|
||||||
|
The new
|
||||||
|
<a href="/pkg/go/build/#Context.ToolTags"><code>Context.ToolTags</code></a>
|
||||||
|
field holds the build tags appropriate to the current Go
|
||||||
|
toolchain configuration.
|
||||||
|
</p>
|
||||||
|
</dd>
|
||||||
|
</dl><!-- go/build -->
|
||||||
|
|
||||||
<dl id="io/fs"><dt><a href="/pkg/io/fs/">io/fs</a></dt>
|
<dl id="io/fs"><dt><a href="/pkg/io/fs/">io/fs</a></dt>
|
||||||
<dd>
|
<dd>
|
||||||
<p><!-- CL 293649 -->
|
<p><!-- CL 293649 -->
|
||||||
@ -400,7 +471,9 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
<dl id="mime"><dt><a href="/pkg/mime/">mime</a></dt>
|
<dl id="mime"><dt><a href="/pkg/mime/">mime</a></dt>
|
||||||
<dd>
|
<dd>
|
||||||
<p><!-- CL 305230 -->
|
<p><!-- CL 305230 -->
|
||||||
TODO: <a href="https://golang.org/cl/305230">https://golang.org/cl/305230</a>: support reading shared mime-info database on unix systems
|
On Unix systems, the table of MIME types is now read from the local system's
|
||||||
|
<a href="https://specifications.freedesktop.org/shared-mime-info-spec/shared-mime-info-spec-0.21.html">Shared MIME-info Database</a>
|
||||||
|
when available.
|
||||||
</p>
|
</p>
|
||||||
</dd>
|
</dd>
|
||||||
</dl><!-- mime -->
|
</dl><!-- mime -->
|
||||||
@ -408,15 +481,20 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
<dl id="net"><dt><a href="/pkg/net/">net</a></dt>
|
<dl id="net"><dt><a href="/pkg/net/">net</a></dt>
|
||||||
<dd>
|
<dd>
|
||||||
<p><!-- CL 272668 -->
|
<p><!-- CL 272668 -->
|
||||||
TODO: <a href="https://golang.org/cl/272668">https://golang.org/cl/272668</a>: add IP.IsPrivate
|
The new method <a href="/pkg/net/#IP.IsPrivate"><code>IP.IsPrivate</code></a> reports whether an address is
|
||||||
|
a private IPv4 address according to <a href="http://tools.ietf.org/html/rfc1918">RFC 1918</a>
|
||||||
|
or a local IPv6 address according <a href="http://tools.ietf.org/html/rfc4193">RFC 4193</a>.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p><!-- CL 301709 -->
|
<p><!-- CL 301709 -->
|
||||||
TODO: <a href="https://golang.org/cl/301709">https://golang.org/cl/301709</a>: make go resolver aware of network parameter
|
The Go DNS resolver now only sends one DNS query when resolving an address for an IPv4-only or IPv6-only network,
|
||||||
|
rather than querying for both address families.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p><!-- CL 307030 -->
|
<p><!-- CL 307030 -->
|
||||||
TODO: <a href="https://golang.org/cl/307030">https://golang.org/cl/307030</a>: make ErrClosed and ParseError implement net.Error
|
The <a href="/pkg/net/#ErrClosed"><code>ErrClosed</code></a> sentinel error and
|
||||||
|
<a href="/pkg/net/#ParseError"><code>ParseError</code></a> error type now implement
|
||||||
|
the <a href="/pkg/net/#Error"><code>net.Error</code></a> interface.
|
||||||
</p>
|
</p>
|
||||||
</dd>
|
</dd>
|
||||||
</dl><!-- net -->
|
</dl><!-- net -->
|
||||||
@ -431,7 +509,9 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p><!-- CL 235437 -->
|
<p><!-- CL 235437 -->
|
||||||
TODO: <a href="https://golang.org/cl/235437">https://golang.org/cl/235437</a>: add to deadlines only when positive
|
Setting the <a href="/pkg/net/http/#Server"><code>Server</code></a>
|
||||||
|
<code>ReadTimeout</code> or <code>WriteTimeout</code> fields to a negative value now indicates no timeout
|
||||||
|
rather than an immediate timeout.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p><!-- CL 308952 -->
|
<p><!-- CL 308952 -->
|
||||||
@ -444,7 +524,10 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
<dl id="net/http/httptest"><dt><a href="/pkg/net/http/httptest/">net/http/httptest</a></dt>
|
<dl id="net/http/httptest"><dt><a href="/pkg/net/http/httptest/">net/http/httptest</a></dt>
|
||||||
<dd>
|
<dd>
|
||||||
<p><!-- CL 308950 -->
|
<p><!-- CL 308950 -->
|
||||||
TODO: <a href="https://golang.org/cl/308950">https://golang.org/cl/308950</a>: panic on non-3 digit (XXX) status code in Recorder.WriteHeader
|
<a href="/pkg/net/http/httptest/#ResponseRecorder.WriteHeader"><code>ResponseRecorder.WriteHeader></code></a>
|
||||||
|
now panics when the provided code is not a valid three-digit HTTP status code.
|
||||||
|
This matches the behavior of <a href="/pkg/net/http/#ResponseWriter"><code>ResponseWriter></code></a>
|
||||||
|
implementations in the <a href="/pkg/net/http/"><code>net/http</code></a> package.
|
||||||
</p>
|
</p>
|
||||||
</dd>
|
</dd>
|
||||||
</dl><!-- net/http/httptest -->
|
</dl><!-- net/http/httptest -->
|
||||||
@ -452,7 +535,8 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
<dl id="net/url"><dt><a href="/pkg/net/url/">net/url</a></dt>
|
<dl id="net/url"><dt><a href="/pkg/net/url/">net/url</a></dt>
|
||||||
<dd>
|
<dd>
|
||||||
<p><!-- CL 314850 -->
|
<p><!-- CL 314850 -->
|
||||||
TODO: <a href="https://golang.org/cl/314850">https://golang.org/cl/314850</a>: add Values.Has
|
The new method <a href="/pkg/net/url/#Values.Has"><code>Values.Has</code></a>
|
||||||
|
reports whether a query parameter is set.
|
||||||
</p>
|
</p>
|
||||||
</dd>
|
</dd>
|
||||||
</dl><!-- net/url -->
|
</dl><!-- net/url -->
|
||||||
@ -479,15 +563,27 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p><!-- CL 281233 -->
|
<p><!-- CL 281233 -->
|
||||||
TODO: <a href="https://golang.org/cl/281233">https://golang.org/cl/281233</a>: add VisibleFields function
|
The new <a href="/pkg/reflect/#VisibleFields"><code>VisibleFields</code></a> function
|
||||||
|
returns all the visible fields in a struct type, including fields inside anonymous struct members.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p><!-- CL 284136 -->
|
<p><!-- CL 284136 -->
|
||||||
TODO: <a href="https://golang.org/cl/284136">https://golang.org/cl/284136</a>: panic if ArrayOf is called with negative length
|
The <a href="/pkg/reflect/#ArrayOf"><code>ArrayOf</code></a> function now panics when
|
||||||
|
called with a negative length.
|
||||||
</p>
|
</p>
|
||||||
</dd>
|
</dd>
|
||||||
</dl><!-- reflect -->
|
</dl><!-- reflect -->
|
||||||
|
|
||||||
|
<dl id="runtime/metrics"><dt><a href="/pkg/runtime/metrics">runtime/metrics</a></dt>
|
||||||
|
<dd>
|
||||||
|
<p><!-- CL 308933, CL 312431, CL 312909 -->
|
||||||
|
New metrics were added that track total bytes and objects allocated and freed.
|
||||||
|
A new metric tracking the distribution of goroutine scheduling latencies was
|
||||||
|
also added.
|
||||||
|
</p>
|
||||||
|
</dd>
|
||||||
|
</dl><!-- runtime/metrics -->
|
||||||
|
|
||||||
<dl id="strconv"><dt><a href="/pkg/strconv/">strconv</a></dt>
|
<dl id="strconv"><dt><a href="/pkg/strconv/">strconv</a></dt>
|
||||||
<dd>
|
<dd>
|
||||||
<p><!-- CL 170079 -->
|
<p><!-- CL 170079 -->
|
||||||
@ -530,11 +626,30 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
<dl id="syscall"><dt><a href="/pkg/syscall/">syscall</a></dt>
|
<dl id="syscall"><dt><a href="/pkg/syscall/">syscall</a></dt>
|
||||||
<dd>
|
<dd>
|
||||||
<p><!-- CL 295371 -->
|
<p><!-- CL 295371 -->
|
||||||
TODO: <a href="https://golang.org/cl/295371">https://golang.org/cl/295371</a>: do not overflow key memory in GetQueuedCompletionStatus
|
<p>
|
||||||
|
The <a href="/pkg/syscall/#GetQueuedCompletionStatus"><code>GetQueuedCompletionStatus</code></a> and
|
||||||
|
<a href="/pkg/syscall/#PostQueuedCompletionStatus"><code>PostQueuedCompletionStatus</code></a>
|
||||||
|
functions are now deprecated. These functions have incorrect signatures and are superseded by
|
||||||
|
equivalents in the <a href="https://godoc.org/golang.org/x/sys/windows"><code>golang.org/x/sys/windows</code></a> package.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p><!-- CL 313653 -->
|
<p><!-- CL 313653 -->
|
||||||
TODO: <a href="https://golang.org/cl/313653">https://golang.org/cl/313653</a>: restore signal mask after setting foreground process group
|
On Unix-like systems, the process group of a child process is now set with signals blocked.
|
||||||
|
This avoids sending a <code>SIGTTOU</code> to the child when the parent is in a background process group.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p><!-- CL 288298, CL 288300 -->
|
||||||
|
The Windows version of
|
||||||
|
<a href="/pkg/syscall/#SysProcAttr"><code>SysProcAttr</code></a>
|
||||||
|
has two new fields. <code>AdditionalInheritedHandles</code> is
|
||||||
|
a list of additional handles to be inherited by the new child
|
||||||
|
process. <code>ParentProcess</code> permits specifying the
|
||||||
|
parent process of the new process.
|
||||||
|
|
||||||
|
<p><!-- CL 311570 -->
|
||||||
|
The constant <code>MSG_CMSG_CLOEXEC</code> is now defined on
|
||||||
|
DragonFly and all OpenBSD systems (it was already defined on
|
||||||
|
some OpenBSD systems and all FreeBSD, NetBSD, and Linux systems).
|
||||||
</p>
|
</p>
|
||||||
</dd>
|
</dd>
|
||||||
</dl><!-- syscall -->
|
</dl><!-- syscall -->
|
||||||
@ -544,13 +659,22 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
<p><!-- CL 310033 -->
|
<p><!-- CL 310033 -->
|
||||||
TODO: <a href="https://golang.org/cl/310033">https://golang.org/cl/310033</a>: add -shuffle=off|on|N to alter the execution order of tests and benchmarks
|
TODO: <a href="https://golang.org/cl/310033">https://golang.org/cl/310033</a>: add -shuffle=off|on|N to alter the execution order of tests and benchmarks
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
<p><!-- CL 260577 -->
|
||||||
|
The new
|
||||||
|
<a href="/pkg/testing/#T.Setenv"><code>T.Setenv</code></a>
|
||||||
|
and <a href="/pkg/testing/#B.Setenv"><code>B.Setenv</code></a>
|
||||||
|
methods support setting an environment variable for the duration
|
||||||
|
of the test or benchmark.
|
||||||
|
</p>
|
||||||
</dd>
|
</dd>
|
||||||
</dl><!-- testing -->
|
</dl><!-- testing -->
|
||||||
|
|
||||||
<dl id="text/template/parse"><dt><a href="/pkg/text/template/parse/">text/template/parse</a></dt>
|
<dl id="text/template/parse"><dt><a href="/pkg/text/template/parse/">text/template/parse</a></dt>
|
||||||
<dd>
|
<dd>
|
||||||
<p><!-- CL 301493 -->
|
<p><!-- CL 301493 -->
|
||||||
TODO: <a href="https://golang.org/cl/301493">https://golang.org/cl/301493</a>: add a mode to skip func-check on parsing
|
The new <a href="/pkg/text/template/parse/#Mode"><code>SkipFuncCheck</code></a> <a href=><code>Mode</code></a>
|
||||||
|
value changes the template parser to not verify that functions are defined.
|
||||||
</p>
|
</p>
|
||||||
</dd>
|
</dd>
|
||||||
</dl><!-- text/template/parse -->
|
</dl><!-- text/template/parse -->
|
||||||
@ -558,9 +682,10 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
<dl id="time"><dt><a href="/pkg/time/">time</a></dt>
|
<dl id="time"><dt><a href="/pkg/time/">time</a></dt>
|
||||||
<dd>
|
<dd>
|
||||||
<p><!-- CL 260858 -->
|
<p><!-- CL 260858 -->
|
||||||
time.Time now has a <a href="/pkg/time/#Time.GoString">GoString</a>
|
The <a href="/pkg/time/#Time"><code>Time</code></a> type now has a
|
||||||
method that will return a more useful value for times when printed with
|
<a href="/pkg/time/#Time.GoString"><code>GoString</code></a> method that
|
||||||
the <code>"%#v"</code> format specifier in the fmt package.
|
will return a more useful value for times when printed with the
|
||||||
|
<code>%#v</code> format specifier in the <code>fmt</code> package.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p><!-- CL 264077 -->
|
<p><!-- CL 264077 -->
|
||||||
@ -574,6 +699,11 @@ Do not send CLs removing the interior tags from such phrases.
|
|||||||
<p><!-- CL 300996 -->
|
<p><!-- CL 300996 -->
|
||||||
TODO: <a href="https://golang.org/cl/300996">https://golang.org/cl/300996</a>: support "," as separator for fractional seconds
|
TODO: <a href="https://golang.org/cl/300996">https://golang.org/cl/300996</a>: support "," as separator for fractional seconds
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
<p><!-- CL 320252 -->
|
||||||
|
The new constant <a href="/pkg/time/#Layout"><code>Layout</code></a>
|
||||||
|
defines the reference time.
|
||||||
|
</p>
|
||||||
</dd>
|
</dd>
|
||||||
</dl><!-- time -->
|
</dl><!-- time -->
|
||||||
|
|
||||||
|
@ -96,7 +96,15 @@ func (z *Reader) init(r io.ReaderAt, size int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
z.r = r
|
z.r = r
|
||||||
z.File = make([]*File, 0, end.directoryRecords)
|
// Since the number of directory records is not validated, it is not
|
||||||
|
// safe to preallocate z.File without first checking that the specified
|
||||||
|
// number of files is reasonable, since a malformed archive may
|
||||||
|
// indicate it contains up to 1 << 128 - 1 files. Since each file has a
|
||||||
|
// header which will be _at least_ 30 bytes we can safely preallocate
|
||||||
|
// if (data size / 30) >= end.directoryRecords.
|
||||||
|
if (uint64(size)-end.directorySize)/30 >= end.directoryRecords {
|
||||||
|
z.File = make([]*File, 0, end.directoryRecords)
|
||||||
|
}
|
||||||
z.Comment = end.comment
|
z.Comment = end.comment
|
||||||
rs := io.NewSectionReader(r, 0, size)
|
rs := io.NewSectionReader(r, 0, size)
|
||||||
if _, err = rs.Seek(int64(end.directoryOffset), io.SeekStart); err != nil {
|
if _, err = rs.Seek(int64(end.directoryOffset), io.SeekStart); err != nil {
|
||||||
|
@ -1325,3 +1325,62 @@ func TestReadDataDescriptor(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCVE202133196(t *testing.T) {
|
||||||
|
// Archive that indicates it has 1 << 128 -1 files,
|
||||||
|
// this would previously cause a panic due to attempting
|
||||||
|
// to allocate a slice with 1 << 128 -1 elements.
|
||||||
|
data := []byte{
|
||||||
|
0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x08,
|
||||||
|
0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x02,
|
||||||
|
0x03, 0x62, 0x61, 0x65, 0x03, 0x04, 0x00, 0x00,
|
||||||
|
0xff, 0xff, 0x50, 0x4b, 0x07, 0x08, 0xbe, 0x20,
|
||||||
|
0x5c, 0x6c, 0x09, 0x00, 0x00, 0x00, 0x03, 0x00,
|
||||||
|
0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00,
|
||||||
|
0x14, 0x00, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0xbe, 0x20, 0x5c, 0x6c, 0x09, 0x00,
|
||||||
|
0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x01, 0x02, 0x03, 0x50, 0x4b, 0x06, 0x06, 0x2c,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d,
|
||||||
|
0x00, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
|
0xff, 0xff, 0xff, 0x31, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x3a, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x50, 0x4b, 0x06, 0x07, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x50,
|
||||||
|
0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0xff,
|
||||||
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
|
0xff, 0xff, 0xff, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
_, err := NewReader(bytes.NewReader(data), int64(len(data)))
|
||||||
|
if err != ErrFormat {
|
||||||
|
t.Fatalf("unexpected error, got: %v, want: %v", err, ErrFormat)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also check that an archive containing a handful of empty
|
||||||
|
// files doesn't cause an issue
|
||||||
|
b := bytes.NewBuffer(nil)
|
||||||
|
w := NewWriter(b)
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
_, err := w.Create("")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Writer.Create failed: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
t.Fatalf("Writer.Close failed: %s", err)
|
||||||
|
}
|
||||||
|
r, err := NewReader(bytes.NewReader(b.Bytes()), int64(b.Len()))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewReader failed: %s", err)
|
||||||
|
}
|
||||||
|
if len(r.File) != 5 {
|
||||||
|
t.Errorf("Archive has unexpected number of files, got %d, want 5", len(r.File))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -7,10 +7,13 @@ package ssa
|
|||||||
import (
|
import (
|
||||||
"cmd/compile/internal/abi"
|
"cmd/compile/internal/abi"
|
||||||
"cmd/compile/internal/ir"
|
"cmd/compile/internal/ir"
|
||||||
|
"cmd/compile/internal/types"
|
||||||
"cmd/internal/dwarf"
|
"cmd/internal/dwarf"
|
||||||
"cmd/internal/obj"
|
"cmd/internal/obj"
|
||||||
|
"cmd/internal/src"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"internal/buildcfg"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
@ -335,6 +338,216 @@ func (s *debugState) stateString(state stateAtPC) string {
|
|||||||
return strings.Join(strs, "")
|
return strings.Join(strs, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// slotCanonicalizer is a table used to lookup and canonicalize
|
||||||
|
// LocalSlot's in a type insensitive way (e.g. taking into account the
|
||||||
|
// base name, offset, and width of the slot, but ignoring the slot
|
||||||
|
// type).
|
||||||
|
type slotCanonicalizer struct {
|
||||||
|
slmap map[slotKey]SlKeyIdx
|
||||||
|
slkeys []LocalSlot
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSlotCanonicalizer() *slotCanonicalizer {
|
||||||
|
return &slotCanonicalizer{
|
||||||
|
slmap: make(map[slotKey]SlKeyIdx),
|
||||||
|
slkeys: []LocalSlot{LocalSlot{N: nil}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type SlKeyIdx uint32
|
||||||
|
|
||||||
|
const noSlot = SlKeyIdx(0)
|
||||||
|
|
||||||
|
// slotKey is a type-insensitive encapsulation of a LocalSlot; it
|
||||||
|
// is used to key a map within slotCanonicalizer.
|
||||||
|
type slotKey struct {
|
||||||
|
name *ir.Name
|
||||||
|
offset int64
|
||||||
|
width int64
|
||||||
|
splitOf SlKeyIdx // idx in slkeys slice in slotCanonicalizer
|
||||||
|
splitOffset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookup looks up a LocalSlot in the slot canonicalizer "sc", returning
|
||||||
|
// a canonical index for the slot, and adding it to the table if need
|
||||||
|
// be. Return value is the canonical slot index, and a boolean indicating
|
||||||
|
// whether the slot was found in the table already (TRUE => found).
|
||||||
|
func (sc *slotCanonicalizer) lookup(ls LocalSlot) (SlKeyIdx, bool) {
|
||||||
|
split := noSlot
|
||||||
|
if ls.SplitOf != nil {
|
||||||
|
split, _ = sc.lookup(*ls.SplitOf)
|
||||||
|
}
|
||||||
|
k := slotKey{
|
||||||
|
name: ls.N, offset: ls.Off, width: ls.Type.Width,
|
||||||
|
splitOf: split, splitOffset: ls.SplitOffset,
|
||||||
|
}
|
||||||
|
if idx, ok := sc.slmap[k]; ok {
|
||||||
|
return idx, true
|
||||||
|
}
|
||||||
|
rv := SlKeyIdx(len(sc.slkeys))
|
||||||
|
sc.slkeys = append(sc.slkeys, ls)
|
||||||
|
sc.slmap[k] = rv
|
||||||
|
return rv, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *slotCanonicalizer) canonSlot(idx SlKeyIdx) LocalSlot {
|
||||||
|
return sc.slkeys[idx]
|
||||||
|
}
|
||||||
|
|
||||||
|
// PopulateABIInRegArgOps examines the entry block of the function
|
||||||
|
// and looks for incoming parameters that have missing or partial
|
||||||
|
// OpArg{Int,Float}Reg values, inserting additional values in
|
||||||
|
// cases where they are missing. Example:
|
||||||
|
//
|
||||||
|
// func foo(s string, used int, notused int) int {
|
||||||
|
// return len(s) + used
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// In the function above, the incoming parameter "used" is fully live,
|
||||||
|
// "notused" is not live, and "s" is partially live (only the length
|
||||||
|
// field of the string is used). At the point where debug value
|
||||||
|
// analysis runs, we might expect to see an entry block with:
|
||||||
|
//
|
||||||
|
// b1:
|
||||||
|
// v4 = ArgIntReg <uintptr> {s+8} [0] : BX
|
||||||
|
// v5 = ArgIntReg <int> {used} [0] : CX
|
||||||
|
//
|
||||||
|
// While this is an accurate picture of the live incoming params,
|
||||||
|
// we also want to have debug locations for non-live params (or
|
||||||
|
// their non-live pieces), e.g. something like
|
||||||
|
//
|
||||||
|
// b1:
|
||||||
|
// v9 = ArgIntReg <*uint8> {s+0} [0] : AX
|
||||||
|
// v4 = ArgIntReg <uintptr> {s+8} [0] : BX
|
||||||
|
// v5 = ArgIntReg <int> {used} [0] : CX
|
||||||
|
// v10 = ArgIntReg <int> {unused} [0] : DI
|
||||||
|
//
|
||||||
|
// This function examines the live OpArg{Int,Float}Reg values and
|
||||||
|
// synthesizes new (dead) values for the non-live params or the
|
||||||
|
// non-live pieces of partially live params.
|
||||||
|
//
|
||||||
|
func PopulateABIInRegArgOps(f *Func) {
|
||||||
|
pri := f.ABISelf.ABIAnalyzeFuncType(f.Type.FuncType())
|
||||||
|
|
||||||
|
// When manufacturing new slots that correspond to splits of
|
||||||
|
// composite parameters, we want to avoid creating a new sub-slot
|
||||||
|
// that differs from some existing sub-slot only by type, since
|
||||||
|
// the debug location analysis will treat that slot as a separate
|
||||||
|
// entity. To achieve this, create a lookup table of existing
|
||||||
|
// slots that is type-insenstitive.
|
||||||
|
sc := newSlotCanonicalizer()
|
||||||
|
for _, sl := range f.Names {
|
||||||
|
sc.lookup(*sl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add slot -> value entry to f.NamedValues if not already present.
|
||||||
|
addToNV := func(v *Value, sl LocalSlot) {
|
||||||
|
values, ok := f.NamedValues[sl]
|
||||||
|
if !ok {
|
||||||
|
// Haven't seen this slot yet.
|
||||||
|
sla := f.localSlotAddr(sl)
|
||||||
|
f.Names = append(f.Names, sla)
|
||||||
|
} else {
|
||||||
|
for _, ev := range values {
|
||||||
|
if v == ev {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
values = append(values, v)
|
||||||
|
f.NamedValues[sl] = values
|
||||||
|
}
|
||||||
|
|
||||||
|
newValues := []*Value{}
|
||||||
|
|
||||||
|
abiRegIndexToRegister := func(reg abi.RegIndex) int8 {
|
||||||
|
i := f.ABISelf.FloatIndexFor(reg)
|
||||||
|
if i >= 0 { // float PR
|
||||||
|
return f.Config.floatParamRegs[i]
|
||||||
|
} else {
|
||||||
|
return f.Config.intParamRegs[reg]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper to construct a new OpArg{Float,Int}Reg op value.
|
||||||
|
var pos src.XPos
|
||||||
|
if len(f.Entry.Values) != 0 {
|
||||||
|
pos = f.Entry.Values[0].Pos
|
||||||
|
}
|
||||||
|
synthesizeOpIntFloatArg := func(n *ir.Name, t *types.Type, reg abi.RegIndex, sl LocalSlot) *Value {
|
||||||
|
aux := &AuxNameOffset{n, sl.Off}
|
||||||
|
op, auxInt := ArgOpAndRegisterFor(reg, f.ABISelf)
|
||||||
|
v := f.newValueNoBlock(op, t, pos)
|
||||||
|
v.AuxInt = auxInt
|
||||||
|
v.Aux = aux
|
||||||
|
v.Args = nil
|
||||||
|
v.Block = f.Entry
|
||||||
|
newValues = append(newValues, v)
|
||||||
|
addToNV(v, sl)
|
||||||
|
f.setHome(v, &f.Config.registers[abiRegIndexToRegister(reg)])
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a pass through the entry block looking for
|
||||||
|
// OpArg{Int,Float}Reg ops. Record the slots they use in a table
|
||||||
|
// ("sc"). We use a type-insensitive lookup for the slot table,
|
||||||
|
// since the type we get from the ABI analyzer won't always match
|
||||||
|
// what the compiler uses when creating OpArg{Int,Float}Reg ops.
|
||||||
|
for _, v := range f.Entry.Values {
|
||||||
|
if v.Op == OpArgIntReg || v.Op == OpArgFloatReg {
|
||||||
|
aux := v.Aux.(*AuxNameOffset)
|
||||||
|
sl := LocalSlot{N: aux.Name, Type: v.Type, Off: aux.Offset}
|
||||||
|
// install slot in lookup table
|
||||||
|
idx, _ := sc.lookup(sl)
|
||||||
|
// add to f.NamedValues if not already present
|
||||||
|
addToNV(v, sc.canonSlot(idx))
|
||||||
|
} else if v.Op.IsCall() {
|
||||||
|
// if we hit a call, we've gone too far.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now make a pass through the ABI in-params, looking for params
|
||||||
|
// or pieces of params that we didn't encounter in the loop above.
|
||||||
|
for _, inp := range pri.InParams() {
|
||||||
|
if !isNamedRegParam(inp) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
n := inp.Name.(*ir.Name)
|
||||||
|
|
||||||
|
// Param is spread across one or more registers. Walk through
|
||||||
|
// each piece to see whether we've seen an arg reg op for it.
|
||||||
|
types, offsets := inp.RegisterTypesAndOffsets()
|
||||||
|
for k, t := range types {
|
||||||
|
// Note: this recipe for creating a LocalSlot is designed
|
||||||
|
// to be compatible with the one used in expand_calls.go
|
||||||
|
// as opposed to decompose.go. The expand calls code just
|
||||||
|
// takes the base name and creates an offset into it,
|
||||||
|
// without using the SplitOf/SplitOffset fields. The code
|
||||||
|
// in decompose.go does the opposite -- it creates a
|
||||||
|
// LocalSlot object with "Off" set to zero, but with
|
||||||
|
// SplitOf pointing to a parent slot, and SplitOffset
|
||||||
|
// holding the offset into the parent object.
|
||||||
|
pieceSlot := LocalSlot{N: n, Type: t, Off: offsets[k]}
|
||||||
|
|
||||||
|
// Look up this piece to see if we've seen a reg op
|
||||||
|
// for it. If not, create one.
|
||||||
|
_, found := sc.lookup(pieceSlot)
|
||||||
|
if !found {
|
||||||
|
// This slot doesn't appear in the map, meaning it
|
||||||
|
// corresponds to an in-param that is not live, or
|
||||||
|
// a portion of an in-param that is not live/used.
|
||||||
|
// Add a new dummy OpArg{Int,Float}Reg for it.
|
||||||
|
synthesizeOpIntFloatArg(n, t, inp.Registers[k],
|
||||||
|
pieceSlot)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert the new values into the head of the block.
|
||||||
|
f.Entry.Values = append(newValues, f.Entry.Values...)
|
||||||
|
}
|
||||||
|
|
||||||
// BuildFuncDebug returns debug information for f.
|
// BuildFuncDebug returns debug information for f.
|
||||||
// f must be fully processed, so that each Value is where it will be when
|
// f must be fully processed, so that each Value is where it will be when
|
||||||
// machine code is emitted.
|
// machine code is emitted.
|
||||||
@ -349,6 +562,10 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu
|
|||||||
state.stackOffset = stackOffset
|
state.stackOffset = stackOffset
|
||||||
state.ctxt = ctxt
|
state.ctxt = ctxt
|
||||||
|
|
||||||
|
if buildcfg.Experiment.RegabiArgs {
|
||||||
|
PopulateABIInRegArgOps(f)
|
||||||
|
}
|
||||||
|
|
||||||
if state.loggingEnabled {
|
if state.loggingEnabled {
|
||||||
state.logf("Generating location lists for function %q\n", f.Name)
|
state.logf("Generating location lists for function %q\n", f.Name)
|
||||||
}
|
}
|
||||||
|
@ -1882,6 +1882,10 @@ func (s *regAllocState) placeSpills() {
|
|||||||
phiRegs[b.ID] = m
|
phiRegs[b.ID] = m
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mustBeFirst := func(op Op) bool {
|
||||||
|
return op.isLoweredGetClosurePtr() || op == OpPhi || op == OpArgIntReg || op == OpArgFloatReg
|
||||||
|
}
|
||||||
|
|
||||||
// Start maps block IDs to the list of spills
|
// Start maps block IDs to the list of spills
|
||||||
// that go at the start of the block (but after any phis).
|
// that go at the start of the block (but after any phis).
|
||||||
start := map[ID][]*Value{}
|
start := map[ID][]*Value{}
|
||||||
@ -1971,7 +1975,7 @@ func (s *regAllocState) placeSpills() {
|
|||||||
// Put the spill in the best block we found.
|
// Put the spill in the best block we found.
|
||||||
spill.Block = best
|
spill.Block = best
|
||||||
spill.AddArg(bestArg)
|
spill.AddArg(bestArg)
|
||||||
if best == v.Block && v.Op != OpPhi {
|
if best == v.Block && !mustBeFirst(v.Op) {
|
||||||
// Place immediately after v.
|
// Place immediately after v.
|
||||||
after[v.ID] = append(after[v.ID], spill)
|
after[v.ID] = append(after[v.ID], spill)
|
||||||
} else {
|
} else {
|
||||||
@ -1983,15 +1987,15 @@ func (s *regAllocState) placeSpills() {
|
|||||||
// Insert spill instructions into the block schedules.
|
// Insert spill instructions into the block schedules.
|
||||||
var oldSched []*Value
|
var oldSched []*Value
|
||||||
for _, b := range s.visitOrder {
|
for _, b := range s.visitOrder {
|
||||||
nphi := 0
|
nfirst := 0
|
||||||
for _, v := range b.Values {
|
for _, v := range b.Values {
|
||||||
if v.Op != OpPhi {
|
if !mustBeFirst(v.Op) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
nphi++
|
nfirst++
|
||||||
}
|
}
|
||||||
oldSched = append(oldSched[:0], b.Values[nphi:]...)
|
oldSched = append(oldSched[:0], b.Values[nfirst:]...)
|
||||||
b.Values = b.Values[:nphi]
|
b.Values = b.Values[:nfirst]
|
||||||
b.Values = append(b.Values, start[b.ID]...)
|
b.Values = append(b.Values, start[b.ID]...)
|
||||||
for _, v := range oldSched {
|
for _, v := range oldSched {
|
||||||
b.Values = append(b.Values, v)
|
b.Values = append(b.Values, v)
|
||||||
|
@ -293,7 +293,7 @@
|
|||||||
//
|
//
|
||||||
// Usage:
|
// Usage:
|
||||||
//
|
//
|
||||||
// go doc [-u] [-c] [package|[package.]symbol[.methodOrField]]
|
// go doc [doc flags] [package|[package.]symbol[.methodOrField]]
|
||||||
//
|
//
|
||||||
// Doc prints the documentation comments associated with the item identified by its
|
// Doc prints the documentation comments associated with the item identified by its
|
||||||
// arguments (a package, const, func, type, var, method, or struct field)
|
// arguments (a package, const, func, type, var, method, or struct field)
|
||||||
|
@ -13,7 +13,7 @@ import (
|
|||||||
|
|
||||||
var CmdDoc = &base.Command{
|
var CmdDoc = &base.Command{
|
||||||
Run: runDoc,
|
Run: runDoc,
|
||||||
UsageLine: "go doc [-u] [-c] [package|[package.]symbol[.methodOrField]]",
|
UsageLine: "go doc [doc flags] [package|[package.]symbol[.methodOrField]]",
|
||||||
CustomFlags: true,
|
CustomFlags: true,
|
||||||
Short: "show documentation for package or symbol",
|
Short: "show documentation for package or symbol",
|
||||||
Long: `
|
Long: `
|
||||||
|
@ -138,14 +138,14 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
|
|||||||
sem := make(chan token, runtime.GOMAXPROCS(0))
|
sem := make(chan token, runtime.GOMAXPROCS(0))
|
||||||
infos, infosErr := modload.ListModules(ctx, args, 0)
|
infos, infosErr := modload.ListModules(ctx, args, 0)
|
||||||
if !haveExplicitArgs {
|
if !haveExplicitArgs {
|
||||||
// 'go mod download' is sometimes run without arguments to pre-populate
|
// 'go mod download' is sometimes run without arguments to pre-populate the
|
||||||
// the module cache. It may fetch modules that aren't needed to build
|
// module cache. It may fetch modules that aren't needed to build packages
|
||||||
// packages in the main mdoule. This is usually not intended, so don't save
|
// in the main mdoule. This is usually not intended, so don't save sums for
|
||||||
// sums for downloaded modules (golang.org/issue/45332).
|
// downloaded modules (golang.org/issue/45332).
|
||||||
// TODO(golang.org/issue/45551): For now, save sums needed to load the
|
// TODO(golang.org/issue/45551): For now, in ListModules, save sums needed
|
||||||
// build list (same as 1.15 behavior). In the future, report an error if
|
// to load the build list (same as 1.15 behavior). In the future, report an
|
||||||
// go.mod or go.sum need to be updated after loading the build list.
|
// error if go.mod or go.sum need to be updated after loading the build
|
||||||
modload.WriteGoMod(ctx)
|
// list.
|
||||||
modload.DisallowWriteGoMod()
|
modload.DisallowWriteGoMod()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,6 +38,7 @@ import (
|
|||||||
"cmd/go/internal/base"
|
"cmd/go/internal/base"
|
||||||
"cmd/go/internal/imports"
|
"cmd/go/internal/imports"
|
||||||
"cmd/go/internal/load"
|
"cmd/go/internal/load"
|
||||||
|
"cmd/go/internal/modfetch"
|
||||||
"cmd/go/internal/modload"
|
"cmd/go/internal/modload"
|
||||||
"cmd/go/internal/par"
|
"cmd/go/internal/par"
|
||||||
"cmd/go/internal/search"
|
"cmd/go/internal/search"
|
||||||
@ -1466,6 +1467,8 @@ func (r *resolver) chooseArbitrarily(cs pathSet) (isPackage bool, m module.Versi
|
|||||||
// checkPackageProblems reloads packages for the given patterns and reports
|
// checkPackageProblems reloads packages for the given patterns and reports
|
||||||
// missing and ambiguous package errors. It also reports retractions and
|
// missing and ambiguous package errors. It also reports retractions and
|
||||||
// deprecations for resolved modules and modules needed to build named packages.
|
// deprecations for resolved modules and modules needed to build named packages.
|
||||||
|
// It also adds a sum for each updated module in the build list if we had one
|
||||||
|
// before and didn't get one while loading packages.
|
||||||
//
|
//
|
||||||
// We skip missing-package errors earlier in the process, since we want to
|
// We skip missing-package errors earlier in the process, since we want to
|
||||||
// resolve pathSets ourselves, but at that point, we don't have enough context
|
// resolve pathSets ourselves, but at that point, we don't have enough context
|
||||||
@ -1593,9 +1596,52 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Load sums for updated modules that had sums before. When we update a
|
||||||
|
// module, we may update another module in the build list that provides a
|
||||||
|
// package in 'all' that wasn't loaded as part of this 'go get' command.
|
||||||
|
// If we don't add a sum for that module, builds may fail later.
|
||||||
|
// Note that an incidentally updated package could still import packages
|
||||||
|
// from unknown modules or from modules in the build list that we didn't
|
||||||
|
// need previously. We can't handle that case without loading 'all'.
|
||||||
|
sumErrs := make([]error, len(r.buildList))
|
||||||
|
for i := range r.buildList {
|
||||||
|
i := i
|
||||||
|
m := r.buildList[i]
|
||||||
|
mActual := m
|
||||||
|
if mRepl := modload.Replacement(m); mRepl.Path != "" {
|
||||||
|
mActual = mRepl
|
||||||
|
}
|
||||||
|
old := module.Version{Path: m.Path, Version: r.initialVersion[m.Path]}
|
||||||
|
if old.Version == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
oldActual := old
|
||||||
|
if oldRepl := modload.Replacement(old); oldRepl.Path != "" {
|
||||||
|
oldActual = oldRepl
|
||||||
|
}
|
||||||
|
if mActual == oldActual || mActual.Version == "" || !modfetch.HaveSum(oldActual) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
r.work.Add(func() {
|
||||||
|
if _, err := modfetch.DownloadZip(ctx, mActual); err != nil {
|
||||||
|
verb := "upgraded"
|
||||||
|
if semver.Compare(m.Version, old.Version) < 0 {
|
||||||
|
verb = "downgraded"
|
||||||
|
}
|
||||||
|
replaced := ""
|
||||||
|
if mActual != m {
|
||||||
|
replaced = fmt.Sprintf(" (replaced by %s)", mActual)
|
||||||
|
}
|
||||||
|
err = fmt.Errorf("%s %s %s => %s%s: error finding sum for %s: %v", verb, m.Path, old.Version, m.Version, replaced, mActual, err)
|
||||||
|
sumErrs[i] = err
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
<-r.work.Idle()
|
<-r.work.Idle()
|
||||||
|
|
||||||
// Report deprecations, then retractions.
|
// Report deprecations, then retractions, then errors fetching sums.
|
||||||
|
// Only errors fetching sums are hard errors.
|
||||||
for _, mm := range deprecations {
|
for _, mm := range deprecations {
|
||||||
if mm.message != "" {
|
if mm.message != "" {
|
||||||
fmt.Fprintf(os.Stderr, "go: module %s is deprecated: %s\n", mm.m.Path, mm.message)
|
fmt.Fprintf(os.Stderr, "go: module %s is deprecated: %s\n", mm.m.Path, mm.message)
|
||||||
@ -1615,6 +1661,12 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin
|
|||||||
if retractPath != "" {
|
if retractPath != "" {
|
||||||
fmt.Fprintf(os.Stderr, "go: to switch to the latest unretracted version, run:\n\tgo get %s@latest\n", retractPath)
|
fmt.Fprintf(os.Stderr, "go: to switch to the latest unretracted version, run:\n\tgo get %s@latest\n", retractPath)
|
||||||
}
|
}
|
||||||
|
for _, err := range sumErrs {
|
||||||
|
if err != nil {
|
||||||
|
base.Errorf("go: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
base.ExitIfErrors()
|
||||||
}
|
}
|
||||||
|
|
||||||
// reportChanges logs version changes to os.Stderr.
|
// reportChanges logs version changes to os.Stderr.
|
||||||
|
@ -1122,12 +1122,11 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if rs.depth == lazy && rs.graph.Load() == nil {
|
if rs.graph.Load() == nil {
|
||||||
// The main module is lazy and we haven't needed to load the module graph so
|
// The module graph was not loaded, possibly because the main module is lazy
|
||||||
// far. Don't incur the cost of loading it now — since we haven't loaded the
|
// or possibly because we haven't needed to load the graph yet.
|
||||||
// graph, we probably don't have any checksums to contribute to the distant
|
// Save sums for the root modules (or their replacements), but don't
|
||||||
// parts of the graph anyway. Instead, just request sums for the roots that
|
// incur the cost of loading the graph just to find and retain the sums.
|
||||||
// we know about.
|
|
||||||
for _, m := range rs.rootModules {
|
for _, m := range rs.rootModules {
|
||||||
r := resolveReplacement(m)
|
r := resolveReplacement(m)
|
||||||
keep[modkey(r)] = true
|
keep[modkey(r)] = true
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
# Tests Issue #12690
|
|
||||||
|
|
||||||
[gccgo] skip 'gccgo does not have GOROOT'
|
|
||||||
|
|
||||||
! stale runtime
|
|
||||||
! stale os
|
|
||||||
! stale io
|
|
||||||
|
|
||||||
env GOROOT=$GOROOT'/'
|
|
||||||
|
|
||||||
! stale runtime
|
|
||||||
! stale os
|
|
||||||
! stale io
|
|
39
src/cmd/go/testdata/script/cgo_stale.txt
vendored
Normal file
39
src/cmd/go/testdata/script/cgo_stale.txt
vendored
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
# golang.org/issue/46347: a stale runtime/cgo should only force a single rebuild
|
||||||
|
|
||||||
|
[!cgo] skip
|
||||||
|
[short] skip
|
||||||
|
|
||||||
|
|
||||||
|
# If we set a unique CGO_CFLAGS, the installed copy of runtime/cgo
|
||||||
|
# should be reported as stale.
|
||||||
|
|
||||||
|
env CGO_CFLAGS=-DTestScript_cgo_stale=true
|
||||||
|
stale runtime/cgo
|
||||||
|
|
||||||
|
|
||||||
|
# If we then build a package that uses cgo, runtime/cgo should be rebuilt and
|
||||||
|
# cached with the new flag, but not installed to GOROOT (and thus still stale).
|
||||||
|
|
||||||
|
env GOCACHE=$WORK/cache # Use a fresh cache to avoid interference between runs.
|
||||||
|
|
||||||
|
go build -x .
|
||||||
|
stderr '[/\\]cgo'$GOEXE'["]? .* -importpath runtime/cgo'
|
||||||
|
stale runtime/cgo
|
||||||
|
|
||||||
|
|
||||||
|
# After runtime/cgo has been rebuilt and cached, it should not be rebuilt again
|
||||||
|
# even though it is still reported as stale.
|
||||||
|
|
||||||
|
go build -x .
|
||||||
|
! stderr '[/\\]cgo'$GOEXE'["]? .* -importpath runtime/cgo'
|
||||||
|
stale runtime/cgo
|
||||||
|
|
||||||
|
|
||||||
|
-- go.mod --
|
||||||
|
module example.com/m
|
||||||
|
|
||||||
|
go 1.17
|
||||||
|
-- m.go --
|
||||||
|
package m
|
||||||
|
|
||||||
|
import "C"
|
31
src/cmd/go/testdata/script/list_std_stale.txt
vendored
31
src/cmd/go/testdata/script/list_std_stale.txt
vendored
@ -1,31 +0,0 @@
|
|||||||
# https://golang.org/issue/44725: packages in std should not be reported as stale,
|
|
||||||
# regardless of whether they are listed from within or outside GOROOT/src.
|
|
||||||
|
|
||||||
# Control case: net should not be stale at the start of the test,
|
|
||||||
# and should depend on vendor/golang.org/… instead of golang.org/….
|
|
||||||
|
|
||||||
! stale net
|
|
||||||
|
|
||||||
go list -deps net
|
|
||||||
stdout '^vendor/golang.org/x/net'
|
|
||||||
! stdout '^golang.org/x/net'
|
|
||||||
|
|
||||||
# Net should also not be stale when viewed from within GOROOT/src,
|
|
||||||
# and should still report the same package dependencies.
|
|
||||||
|
|
||||||
cd $GOROOT/src
|
|
||||||
! stale net
|
|
||||||
|
|
||||||
go list -deps net
|
|
||||||
stdout '^vendor/golang.org/x/net'
|
|
||||||
! stdout '^golang.org/x/net'
|
|
||||||
|
|
||||||
|
|
||||||
# However, 'go mod' and 'go get' subcommands should report the original module
|
|
||||||
# dependencies, not the vendored packages.
|
|
||||||
|
|
||||||
[!net] stop
|
|
||||||
|
|
||||||
env GOPROXY=
|
|
||||||
go mod why -m golang.org/x/net
|
|
||||||
stdout '^# golang.org/x/net\nnet\ngolang.org/x/net'
|
|
32
src/cmd/go/testdata/script/list_std_vendor.txt
vendored
Normal file
32
src/cmd/go/testdata/script/list_std_vendor.txt
vendored
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
# https://golang.org/issue/44725: packages in std should have the same
|
||||||
|
# dependencies regardless of whether they are listed from within or outside
|
||||||
|
# GOROOT/src.
|
||||||
|
|
||||||
|
# Control case: net, viewed from outside the 'std' module,
|
||||||
|
# should depend on vendor/golang.org/… instead of golang.org/….
|
||||||
|
|
||||||
|
go list -deps net
|
||||||
|
stdout '^vendor/golang.org/x/net'
|
||||||
|
! stdout '^golang.org/x/net'
|
||||||
|
cp stdout $WORK/net-deps.txt
|
||||||
|
|
||||||
|
|
||||||
|
# It should still report the same package dependencies when viewed from
|
||||||
|
# within GOROOT/src.
|
||||||
|
|
||||||
|
cd $GOROOT/src
|
||||||
|
|
||||||
|
go list -deps net
|
||||||
|
stdout '^vendor/golang.org/x/net'
|
||||||
|
! stdout '^golang.org/x/net'
|
||||||
|
cmp stdout $WORK/net-deps.txt
|
||||||
|
|
||||||
|
|
||||||
|
# However, 'go mod' and 'go get' subcommands should report the original module
|
||||||
|
# dependencies, not the vendored packages.
|
||||||
|
|
||||||
|
[!net] stop
|
||||||
|
|
||||||
|
env GOPROXY=
|
||||||
|
go mod why -m golang.org/x/net
|
||||||
|
stdout '^# golang.org/x/net\nnet\ngolang.org/x/net'
|
1
src/cmd/go/testdata/script/mod_download.txt
vendored
1
src/cmd/go/testdata/script/mod_download.txt
vendored
@ -167,5 +167,4 @@ require (
|
|||||||
-- update/go.sum.update --
|
-- update/go.sum.update --
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0=
|
rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0=
|
||||||
rsc.io/sampler v1.2.1/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
|
120
src/cmd/go/testdata/script/mod_get_update_unrelated_sum.txt
vendored
Normal file
120
src/cmd/go/testdata/script/mod_get_update_unrelated_sum.txt
vendored
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
# Check that 'go get' adds sums for updated modules if we had sums before,
|
||||||
|
# even if we didn't load packages from them.
|
||||||
|
# Verifies #44129.
|
||||||
|
|
||||||
|
env fmt='{{.ImportPath}}: {{if .Error}}{{.Error.Err}}{{else}}ok{{end}}'
|
||||||
|
|
||||||
|
# Control case: before upgrading, we have the sums we need.
|
||||||
|
# go list -deps -e -f $fmt .
|
||||||
|
# stdout '^rsc.io/quote: ok$'
|
||||||
|
# ! stdout rsc.io/sampler # not imported by quote in this version
|
||||||
|
cp go.mod.orig go.mod
|
||||||
|
cp go.sum.orig go.sum
|
||||||
|
go mod tidy
|
||||||
|
cmp go.mod.orig go.mod
|
||||||
|
cmp go.sum.orig go.sum
|
||||||
|
|
||||||
|
|
||||||
|
# Upgrade a module. This also upgrades rsc.io/quote, and though we didn't load
|
||||||
|
# a package from it, we had the sum for its old version, so we need the
|
||||||
|
# sum for the new version, too.
|
||||||
|
go get -d example.com/upgrade@v0.0.2
|
||||||
|
grep '^rsc.io/quote v1.5.2 ' go.sum
|
||||||
|
|
||||||
|
# The upgrade still breaks the build because the new version of quote imports
|
||||||
|
# rsc.io/sampler, and we don't have its zip sum.
|
||||||
|
go list -deps -e -f $fmt
|
||||||
|
stdout 'rsc.io/quote: ok'
|
||||||
|
stdout 'rsc.io/sampler: missing go.sum entry for module providing package rsc.io/sampler'
|
||||||
|
cp go.mod.orig go.mod
|
||||||
|
cp go.sum.orig go.sum
|
||||||
|
|
||||||
|
|
||||||
|
# Replace the old version with a directory before upgrading.
|
||||||
|
# We didn't need a sum for it before (even though we had one), so we won't
|
||||||
|
# fetch a new sum.
|
||||||
|
go mod edit -replace rsc.io/quote@v1.0.0=./dummy
|
||||||
|
go get -d example.com/upgrade@v0.0.2
|
||||||
|
! grep '^rsc.io/quote v1.5.2 ' go.sum
|
||||||
|
cp go.mod.orig go.mod
|
||||||
|
cp go.sum.orig go.sum
|
||||||
|
|
||||||
|
|
||||||
|
# Replace the new version with a directory before upgrading.
|
||||||
|
# We can't get a sum for a directory.
|
||||||
|
go mod edit -replace rsc.io/quote@v1.5.2=./dummy
|
||||||
|
go get -d example.com/upgrade@v0.0.2
|
||||||
|
! grep '^rsc.io/quote v1.5.2 ' go.sum
|
||||||
|
cp go.mod.orig go.mod
|
||||||
|
cp go.sum.orig go.sum
|
||||||
|
|
||||||
|
|
||||||
|
# Replace the new version with a different version.
|
||||||
|
# We should get a sum for that version.
|
||||||
|
go mod edit -replace rsc.io/quote@v1.5.2=rsc.io/quote@v1.5.1
|
||||||
|
go get -d example.com/upgrade@v0.0.2
|
||||||
|
! grep '^rsc.io/quote v1.5.2 ' go.sum
|
||||||
|
grep '^rsc.io/quote v1.5.1 ' go.sum
|
||||||
|
cp go.mod.orig go.mod
|
||||||
|
cp go.sum.orig go.sum
|
||||||
|
|
||||||
|
|
||||||
|
# Delete the new version's zip (but not mod) from the cache and go offline.
|
||||||
|
# 'go get' should fail when fetching the zip.
|
||||||
|
rm $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip
|
||||||
|
env GOPROXY=off
|
||||||
|
! go get -d example.com/upgrade@v0.0.2
|
||||||
|
stderr '^go: upgraded rsc.io/quote v1.0.0 => v1.5.2: error finding sum for rsc.io/quote@v1.5.2: module lookup disabled by GOPROXY=off$'
|
||||||
|
|
||||||
|
-- go.mod.orig --
|
||||||
|
module m
|
||||||
|
|
||||||
|
go 1.16
|
||||||
|
|
||||||
|
require (
|
||||||
|
example.com/upgrade v0.0.1
|
||||||
|
rsc.io/quote v1.0.0
|
||||||
|
)
|
||||||
|
|
||||||
|
replace (
|
||||||
|
example.com/upgrade v0.0.1 => ./upgrade1
|
||||||
|
example.com/upgrade v0.0.2 => ./upgrade2
|
||||||
|
)
|
||||||
|
-- go.sum.orig --
|
||||||
|
rsc.io/quote v1.0.0 h1:kQ3IZQzPTiDJxSZI98YaWgxFEhlNdYASHvh+MplbViw=
|
||||||
|
rsc.io/quote v1.0.0/go.mod h1:v83Ri/njykPcgJltBc/gEkJTmjTsNgtO1Y7vyIK1CQA=
|
||||||
|
-- go.sum.want --
|
||||||
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
rsc.io/quote v1.0.0 h1:kQ3IZQzPTiDJxSZI98YaWgxFEhlNdYASHvh+MplbViw=
|
||||||
|
rsc.io/quote v1.0.0/go.mod h1:v83Ri/njykPcgJltBc/gEkJTmjTsNgtO1Y7vyIK1CQA=
|
||||||
|
rsc.io/quote v1.5.2 h1:3fEykkD9k7lYzXqCYrwGAf7iNhbk4yCjHmKBN9td4L0=
|
||||||
|
rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0=
|
||||||
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
|
-- use.go --
|
||||||
|
package use
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "example.com/upgrade"
|
||||||
|
_ "rsc.io/quote"
|
||||||
|
)
|
||||||
|
-- upgrade1/go.mod --
|
||||||
|
module example.com/upgrade
|
||||||
|
|
||||||
|
go 1.16
|
||||||
|
-- upgrade1/upgrade.go --
|
||||||
|
package upgrade
|
||||||
|
-- upgrade2/go.mod --
|
||||||
|
module example.com/upgrade
|
||||||
|
|
||||||
|
go 1.16
|
||||||
|
|
||||||
|
require rsc.io/quote v1.5.2 // indirect
|
||||||
|
-- upgrade2/upgrade.go --
|
||||||
|
package upgrade
|
||||||
|
-- dummy/go.mod --
|
||||||
|
module rsc.io/quote
|
||||||
|
|
||||||
|
go 1.16
|
||||||
|
-- dummy/quote.go --
|
||||||
|
package quote
|
||||||
|
|
@ -2,8 +2,6 @@
|
|||||||
|
|
||||||
[!race] skip
|
[!race] skip
|
||||||
|
|
||||||
[!darwin] ! stale cmd/cgo # The darwin builders are spuriously stale; see #33598.
|
|
||||||
|
|
||||||
env GOBIN=$WORK/bin
|
env GOBIN=$WORK/bin
|
||||||
go install m/mtime m/sametime
|
go install m/mtime m/sametime
|
||||||
|
|
||||||
|
6
src/cmd/go/testdata/script/toolexec.txt
vendored
6
src/cmd/go/testdata/script/toolexec.txt
vendored
@ -3,6 +3,12 @@
|
|||||||
# Build our simple toolexec program.
|
# Build our simple toolexec program.
|
||||||
go build ./cmd/mytool
|
go build ./cmd/mytool
|
||||||
|
|
||||||
|
# Use an ephemeral build cache so that our toolexec output is not cached
|
||||||
|
# for any stale standard-library dependencies.
|
||||||
|
#
|
||||||
|
# TODO(#27628): This should not be necessary.
|
||||||
|
env GOCACHE=$WORK/gocache
|
||||||
|
|
||||||
# Build the main package with our toolexec program. For each action, it will
|
# Build the main package with our toolexec program. For each action, it will
|
||||||
# print the tool's name and the TOOLEXEC_IMPORTPATH value. We expect to compile
|
# print the tool's name and the TOOLEXEC_IMPORTPATH value. We expect to compile
|
||||||
# each package once, and link the main package once.
|
# each package once, and link the main package once.
|
||||||
|
@ -19,6 +19,36 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TestMain allows this test binary to run as a -toolexec wrapper for the 'go'
|
||||||
|
// command. If LINK_TEST_TOOLEXEC is set, TestMain runs the binary as if it were
|
||||||
|
// cmd/link, and otherwise runs the requested tool as a subprocess.
|
||||||
|
//
|
||||||
|
// This allows the test to verify the behavior of the current contents of the
|
||||||
|
// cmd/link package even if the installed cmd/link binary is stale.
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
if os.Getenv("LINK_TEST_TOOLEXEC") == "" {
|
||||||
|
// Not running as a -toolexec wrapper. Just run the tests.
|
||||||
|
os.Exit(m.Run())
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.TrimSuffix(filepath.Base(os.Args[1]), ".exe") == "link" {
|
||||||
|
// Running as a -toolexec linker, and the tool is cmd/link.
|
||||||
|
// Substitute this test binary for the linker.
|
||||||
|
os.Args = os.Args[1:]
|
||||||
|
main()
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(os.Args[1], os.Args[2:]...)
|
||||||
|
cmd.Stdin = os.Stdin
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
func testDWARF(t *testing.T, buildmode string, expectDWARF bool, env ...string) {
|
func testDWARF(t *testing.T, buildmode string, expectDWARF bool, env ...string) {
|
||||||
testenv.MustHaveCGO(t)
|
testenv.MustHaveCGO(t)
|
||||||
testenv.MustHaveGoBuild(t)
|
testenv.MustHaveGoBuild(t)
|
||||||
@ -29,17 +59,6 @@ func testDWARF(t *testing.T, buildmode string, expectDWARF bool, env ...string)
|
|||||||
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
out, err := exec.Command(testenv.GoToolPath(t), "list", "-f", "{{.Stale}}", "cmd/link").CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("go list: %v\n%s", err, out)
|
|
||||||
}
|
|
||||||
if string(out) != "false\n" {
|
|
||||||
if strings.HasPrefix(testenv.Builder(), "darwin-") {
|
|
||||||
t.Skipf("cmd/link is spuriously stale on Darwin builders - see #33598")
|
|
||||||
}
|
|
||||||
t.Fatalf("cmd/link is stale - run go install cmd/link")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, prog := range []string{"testprog", "testprogcgo"} {
|
for _, prog := range []string{"testprog", "testprogcgo"} {
|
||||||
prog := prog
|
prog := prog
|
||||||
expectDWARF := expectDWARF
|
expectDWARF := expectDWARF
|
||||||
@ -48,11 +67,11 @@ func testDWARF(t *testing.T, buildmode string, expectDWARF bool, env ...string)
|
|||||||
if extld == "" {
|
if extld == "" {
|
||||||
extld = "gcc"
|
extld = "gcc"
|
||||||
}
|
}
|
||||||
|
var err error
|
||||||
expectDWARF, err = cmddwarf.IsDWARFEnabledOnAIXLd(extld)
|
expectDWARF, err = cmddwarf.IsDWARFEnabledOnAIXLd(extld)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run(prog, func(t *testing.T) {
|
t.Run(prog, func(t *testing.T) {
|
||||||
@ -62,15 +81,14 @@ func testDWARF(t *testing.T, buildmode string, expectDWARF bool, env ...string)
|
|||||||
|
|
||||||
exe := filepath.Join(tmpDir, prog+".exe")
|
exe := filepath.Join(tmpDir, prog+".exe")
|
||||||
dir := "../../runtime/testdata/" + prog
|
dir := "../../runtime/testdata/" + prog
|
||||||
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", exe)
|
cmd := exec.Command(testenv.GoToolPath(t), "build", "-toolexec", os.Args[0], "-o", exe)
|
||||||
if buildmode != "" {
|
if buildmode != "" {
|
||||||
cmd.Args = append(cmd.Args, "-buildmode", buildmode)
|
cmd.Args = append(cmd.Args, "-buildmode", buildmode)
|
||||||
}
|
}
|
||||||
cmd.Args = append(cmd.Args, dir)
|
cmd.Args = append(cmd.Args, dir)
|
||||||
if env != nil {
|
cmd.Env = append(os.Environ(), env...)
|
||||||
cmd.Env = append(os.Environ(), env...)
|
cmd.Env = append(cmd.Env, "CGO_CFLAGS=") // ensure CGO_CFLAGS does not contain any flags. Issue #35459
|
||||||
cmd.Env = append(cmd.Env, "CGO_CFLAGS=") // ensure CGO_CFLAGS does not contain any flags. Issue #35459
|
cmd.Env = append(cmd.Env, "LINK_TEST_TOOLEXEC=1")
|
||||||
}
|
|
||||||
out, err := cmd.CombinedOutput()
|
out, err := cmd.CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("go build -o %v %v: %v\n%s", exe, dir, err, out)
|
t.Fatalf("go build -o %v %v: %v\n%s", exe, dir, err, out)
|
||||||
|
@ -1550,7 +1550,7 @@ func (ctxt *Link) dodata(symGroupType []sym.SymKind) {
|
|||||||
|
|
||||||
if ctxt.HeadType == objabi.Haix && ctxt.LinkMode == LinkExternal {
|
if ctxt.HeadType == objabi.Haix && ctxt.LinkMode == LinkExternal {
|
||||||
// These symbols must have the same alignment as their section.
|
// These symbols must have the same alignment as their section.
|
||||||
// Otherwize, ld might change the layout of Go sections.
|
// Otherwise, ld might change the layout of Go sections.
|
||||||
ldr.SetSymAlign(ldr.Lookup("runtime.data", 0), state.dataMaxAlign[sym.SDATA])
|
ldr.SetSymAlign(ldr.Lookup("runtime.data", 0), state.dataMaxAlign[sym.SDATA])
|
||||||
ldr.SetSymAlign(ldr.Lookup("runtime.bss", 0), state.dataMaxAlign[sym.SBSS])
|
ldr.SetSymAlign(ldr.Lookup("runtime.bss", 0), state.dataMaxAlign[sym.SBSS])
|
||||||
}
|
}
|
||||||
|
@ -698,7 +698,7 @@ func TestTrampolineCgo(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("executable failed to run: %v\n%s", err, out)
|
t.Errorf("executable failed to run: %v\n%s", err, out)
|
||||||
}
|
}
|
||||||
if string(out) != "hello\n" {
|
if string(out) != "hello\n" && string(out) != "hello\r\n" {
|
||||||
t.Errorf("unexpected output:\n%s", out)
|
t.Errorf("unexpected output:\n%s", out)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -717,7 +717,7 @@ func TestTrampolineCgo(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("executable failed to run: %v\n%s", err, out)
|
t.Errorf("executable failed to run: %v\n%s", err, out)
|
||||||
}
|
}
|
||||||
if string(out) != "hello\n" {
|
if string(out) != "hello\n" && string(out) != "hello\r\n" {
|
||||||
t.Errorf("unexpected output:\n%s", out)
|
t.Errorf("unexpected output:\n%s", out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ func (curve p521Curve) IsOnCurve(x, y *big.Int) bool {
|
|||||||
return x3.Equal(y2) == 1
|
return x3.Equal(y2) == 1
|
||||||
}
|
}
|
||||||
|
|
||||||
type p512Point struct {
|
type p521Point struct {
|
||||||
x, y, z *fiat.P521Element
|
x, y, z *fiat.P521Element
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,7 +67,7 @@ func fiatP521ToBigInt(x *fiat.P521Element) *big.Int {
|
|||||||
// affineFromJacobian brings a point in Jacobian coordinates back to affine
|
// affineFromJacobian brings a point in Jacobian coordinates back to affine
|
||||||
// coordinates, with (0, 0) representing infinity by convention. It also goes
|
// coordinates, with (0, 0) representing infinity by convention. It also goes
|
||||||
// back to big.Int values to match the exposed API.
|
// back to big.Int values to match the exposed API.
|
||||||
func (curve p521Curve) affineFromJacobian(p *p512Point) (x, y *big.Int) {
|
func (curve p521Curve) affineFromJacobian(p *p521Point) (x, y *big.Int) {
|
||||||
if p.z.IsZero() == 1 {
|
if p.z.IsZero() == 1 {
|
||||||
return new(big.Int), new(big.Int)
|
return new(big.Int), new(big.Int)
|
||||||
}
|
}
|
||||||
@ -99,17 +99,17 @@ func bigIntToFiatP521(x *big.Int) *fiat.P521Element {
|
|||||||
// jacobianFromAffine converts (x, y) affine coordinates into (x, y, z) Jacobian
|
// jacobianFromAffine converts (x, y) affine coordinates into (x, y, z) Jacobian
|
||||||
// coordinates. It also converts from big.Int to fiat, which is necessarily a
|
// coordinates. It also converts from big.Int to fiat, which is necessarily a
|
||||||
// messy and variable-time operation, which we can't avoid due to the exposed API.
|
// messy and variable-time operation, which we can't avoid due to the exposed API.
|
||||||
func (curve p521Curve) jacobianFromAffine(x, y *big.Int) *p512Point {
|
func (curve p521Curve) jacobianFromAffine(x, y *big.Int) *p521Point {
|
||||||
// (0, 0) is by convention the point at infinity, which can't be represented
|
// (0, 0) is by convention the point at infinity, which can't be represented
|
||||||
// in affine coordinates, but is (0, 0, 0) in Jacobian.
|
// in affine coordinates, but is (0, 0, 0) in Jacobian.
|
||||||
if x.Sign() == 0 && y.Sign() == 0 {
|
if x.Sign() == 0 && y.Sign() == 0 {
|
||||||
return &p512Point{
|
return &p521Point{
|
||||||
x: new(fiat.P521Element),
|
x: new(fiat.P521Element),
|
||||||
y: new(fiat.P521Element),
|
y: new(fiat.P521Element),
|
||||||
z: new(fiat.P521Element),
|
z: new(fiat.P521Element),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &p512Point{
|
return &p521Point{
|
||||||
x: bigIntToFiatP521(x),
|
x: bigIntToFiatP521(x),
|
||||||
y: bigIntToFiatP521(y),
|
y: bigIntToFiatP521(y),
|
||||||
z: new(fiat.P521Element).One(),
|
z: new(fiat.P521Element).One(),
|
||||||
@ -123,7 +123,7 @@ func (curve p521Curve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// addJacobian sets q = p1 + p2, and returns q. The points may overlap.
|
// addJacobian sets q = p1 + p2, and returns q. The points may overlap.
|
||||||
func (q *p512Point) addJacobian(p1, p2 *p512Point) *p512Point {
|
func (q *p521Point) addJacobian(p1, p2 *p521Point) *p521Point {
|
||||||
// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
|
// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
|
||||||
z1IsZero := p1.z.IsZero()
|
z1IsZero := p1.z.IsZero()
|
||||||
z2IsZero := p2.z.IsZero()
|
z2IsZero := p2.z.IsZero()
|
||||||
@ -189,7 +189,7 @@ func (curve p521Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// doubleJacobian sets q = p + p, and returns q. The points may overlap.
|
// doubleJacobian sets q = p + p, and returns q. The points may overlap.
|
||||||
func (q *p512Point) doubleJacobian(p *p512Point) *p512Point {
|
func (q *p521Point) doubleJacobian(p *p521Point) *p521Point {
|
||||||
// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
|
// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
|
||||||
delta := new(fiat.P521Element).Square(p.z)
|
delta := new(fiat.P521Element).Square(p.z)
|
||||||
gamma := new(fiat.P521Element).Square(p.y)
|
gamma := new(fiat.P521Element).Square(p.y)
|
||||||
@ -230,11 +230,11 @@ func (q *p512Point) doubleJacobian(p *p512Point) *p512Point {
|
|||||||
|
|
||||||
func (curve p521Curve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) {
|
func (curve p521Curve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) {
|
||||||
B := curve.jacobianFromAffine(Bx, By)
|
B := curve.jacobianFromAffine(Bx, By)
|
||||||
p, t := &p512Point{
|
p, t := &p521Point{
|
||||||
x: new(fiat.P521Element),
|
x: new(fiat.P521Element),
|
||||||
y: new(fiat.P521Element),
|
y: new(fiat.P521Element),
|
||||||
z: new(fiat.P521Element),
|
z: new(fiat.P521Element),
|
||||||
}, &p512Point{
|
}, &p521Point{
|
||||||
x: new(fiat.P521Element),
|
x: new(fiat.P521Element),
|
||||||
y: new(fiat.P521Element),
|
y: new(fiat.P521Element),
|
||||||
z: new(fiat.P521Element),
|
z: new(fiat.P521Element),
|
||||||
|
@ -259,7 +259,7 @@ func (f *FieldList) End() token.Pos {
|
|||||||
return token.NoPos
|
return token.NoPos
|
||||||
}
|
}
|
||||||
|
|
||||||
// NumFields returns the number of (type) parameters or struct fields represented by a FieldList.
|
// NumFields returns the number of parameters or struct fields represented by a FieldList.
|
||||||
func (f *FieldList) NumFields() int {
|
func (f *FieldList) NumFields() int {
|
||||||
n := 0
|
n := 0
|
||||||
if f != nil {
|
if f != nil {
|
||||||
@ -973,10 +973,6 @@ type (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func (f *FuncDecl) IsMethod() bool {
|
|
||||||
return f.Recv.NumFields() != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pos and End implementations for declaration nodes.
|
// Pos and End implementations for declaration nodes.
|
||||||
|
|
||||||
func (d *BadDecl) Pos() token.Pos { return d.From }
|
func (d *BadDecl) Pos() token.Pos { return d.From }
|
||||||
|
@ -101,12 +101,12 @@ type ImporterFrom interface {
|
|||||||
// A Config specifies the configuration for type checking.
|
// A Config specifies the configuration for type checking.
|
||||||
// The zero value for Config is a ready-to-use default configuration.
|
// The zero value for Config is a ready-to-use default configuration.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// GoVersion describes the accepted Go language version. The string
|
// goVersion describes the accepted Go language version. The string
|
||||||
// must follow the format "go%d.%d" (e.g. "go1.12") or it must be
|
// must follow the format "go%d.%d" (e.g. "go1.12") or it must be
|
||||||
// empty; an empty string indicates the latest language version.
|
// empty; an empty string indicates the latest language version.
|
||||||
// If the format is invalid, invoking the type checker will cause a
|
// If the format is invalid, invoking the type checker will cause a
|
||||||
// panic.
|
// panic.
|
||||||
GoVersion string
|
goVersion string
|
||||||
|
|
||||||
// If IgnoreFuncBodies is set, function bodies are not
|
// If IgnoreFuncBodies is set, function bodies are not
|
||||||
// type-checked.
|
// type-checked.
|
||||||
|
@ -179,9 +179,9 @@ func NewChecker(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Ch
|
|||||||
info = new(Info)
|
info = new(Info)
|
||||||
}
|
}
|
||||||
|
|
||||||
version, err := parseGoVersion(conf.GoVersion)
|
version, err := parseGoVersion(conf.goVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Sprintf("invalid Go version %q (%v)", conf.GoVersion, err))
|
panic(fmt.Sprintf("invalid Go version %q (%v)", conf.goVersion, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Checker{
|
return &Checker{
|
||||||
|
@ -240,7 +240,7 @@ func checkFiles(t *testing.T, sizes Sizes, goVersion string, filenames []string,
|
|||||||
// typecheck and collect typechecker errors
|
// typecheck and collect typechecker errors
|
||||||
var conf Config
|
var conf Config
|
||||||
conf.Sizes = sizes
|
conf.Sizes = sizes
|
||||||
conf.GoVersion = goVersion
|
SetGoVersion(&conf, goVersion)
|
||||||
|
|
||||||
// special case for importC.src
|
// special case for importC.src
|
||||||
if len(filenames) == 1 {
|
if len(filenames) == 1 {
|
||||||
@ -330,6 +330,14 @@ func TestIndexRepresentability(t *testing.T) {
|
|||||||
checkFiles(t, &StdSizes{4, 4}, "", []string{"index.go"}, [][]byte{[]byte(src)}, false)
|
checkFiles(t, &StdSizes{4, 4}, "", []string{"index.go"}, [][]byte{[]byte(src)}, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIssue46453(t *testing.T) {
|
||||||
|
if typeparams.Enabled {
|
||||||
|
t.Skip("type params are enabled")
|
||||||
|
}
|
||||||
|
const src = "package p\ntype _ comparable // ERROR \"undeclared name: comparable\""
|
||||||
|
checkFiles(t, nil, "", []string{"issue46453.go"}, [][]byte{[]byte(src)}, false)
|
||||||
|
}
|
||||||
|
|
||||||
func TestCheck(t *testing.T) { DefPredeclaredTestFuncs(); testDir(t, "check") }
|
func TestCheck(t *testing.T) { DefPredeclaredTestFuncs(); testDir(t, "check") }
|
||||||
func TestExamples(t *testing.T) { testDir(t, "examples") }
|
func TestExamples(t *testing.T) { testDir(t, "examples") }
|
||||||
func TestFixedbugs(t *testing.T) { testDir(t, "fixedbugs") }
|
func TestFixedbugs(t *testing.T) { testDir(t, "fixedbugs") }
|
||||||
|
@ -577,15 +577,37 @@ func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init ast.Expr) {
|
|||||||
// n0.check != nil, the cycle is reported.
|
// n0.check != nil, the cycle is reported.
|
||||||
func (n0 *Named) under() Type {
|
func (n0 *Named) under() Type {
|
||||||
u := n0.underlying
|
u := n0.underlying
|
||||||
if u == nil {
|
|
||||||
return Typ[Invalid]
|
if u == Typ[Invalid] {
|
||||||
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the underlying type of a defined type is not a defined
|
// If the underlying type of a defined type is not a defined
|
||||||
// type, then that is the desired underlying type.
|
// (incl. instance) type, then that is the desired underlying
|
||||||
|
// type.
|
||||||
|
switch u.(type) {
|
||||||
|
case nil:
|
||||||
|
return Typ[Invalid]
|
||||||
|
default:
|
||||||
|
// common case
|
||||||
|
return u
|
||||||
|
case *Named, *instance:
|
||||||
|
// handled below
|
||||||
|
}
|
||||||
|
|
||||||
|
if n0.check == nil {
|
||||||
|
panic("internal error: Named.check == nil but type is incomplete")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Invariant: after this point n0 as well as any named types in its
|
||||||
|
// underlying chain should be set up when this function exits.
|
||||||
|
check := n0.check
|
||||||
|
|
||||||
|
// If we can't expand u at this point, it is invalid.
|
||||||
n := asNamed(u)
|
n := asNamed(u)
|
||||||
if n == nil {
|
if n == nil {
|
||||||
return u // common case
|
n0.underlying = Typ[Invalid]
|
||||||
|
return n0.underlying
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise, follow the forward chain.
|
// Otherwise, follow the forward chain.
|
||||||
@ -597,7 +619,16 @@ func (n0 *Named) under() Type {
|
|||||||
u = Typ[Invalid]
|
u = Typ[Invalid]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
n1 := asNamed(u)
|
var n1 *Named
|
||||||
|
switch u1 := u.(type) {
|
||||||
|
case *Named:
|
||||||
|
n1 = u1
|
||||||
|
case *instance:
|
||||||
|
n1, _ = u1.expand().(*Named)
|
||||||
|
if n1 == nil {
|
||||||
|
u = Typ[Invalid]
|
||||||
|
}
|
||||||
|
}
|
||||||
if n1 == nil {
|
if n1 == nil {
|
||||||
break // end of chain
|
break // end of chain
|
||||||
}
|
}
|
||||||
@ -608,11 +639,7 @@ func (n0 *Named) under() Type {
|
|||||||
|
|
||||||
if i, ok := seen[n]; ok {
|
if i, ok := seen[n]; ok {
|
||||||
// cycle
|
// cycle
|
||||||
// TODO(rFindley) revert this to a method on Checker. Having a possibly
|
check.cycleError(path[i:])
|
||||||
// nil Checker on Named and TypeParam is too subtle.
|
|
||||||
if n0.check != nil {
|
|
||||||
n0.check.cycleError(path[i:])
|
|
||||||
}
|
|
||||||
u = Typ[Invalid]
|
u = Typ[Invalid]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -622,8 +649,8 @@ func (n0 *Named) under() Type {
|
|||||||
// We should never have to update the underlying type of an imported type;
|
// We should never have to update the underlying type of an imported type;
|
||||||
// those underlying types should have been resolved during the import.
|
// those underlying types should have been resolved during the import.
|
||||||
// Also, doing so would lead to a race condition (was issue #31749).
|
// Also, doing so would lead to a race condition (was issue #31749).
|
||||||
// Do this check always, not just in debug more (it's cheap).
|
// Do this check always, not just in debug mode (it's cheap).
|
||||||
if n0.check != nil && n.obj.pkg != n0.check.pkg {
|
if n.obj.pkg != check.pkg {
|
||||||
panic("internal error: imported type with unresolved underlying type")
|
panic("internal error: imported type with unresolved underlying type")
|
||||||
}
|
}
|
||||||
n.underlying = u
|
n.underlying = u
|
||||||
@ -665,7 +692,7 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *Named) {
|
|||||||
} else {
|
} else {
|
||||||
// defined type declaration
|
// defined type declaration
|
||||||
|
|
||||||
named := &Named{check: check, obj: obj}
|
named := check.newNamed(obj, nil, nil)
|
||||||
def.setUnderlying(named)
|
def.setUnderlying(named)
|
||||||
obj.typ = named // make sure recursive type declarations terminate
|
obj.typ = named // make sure recursive type declarations terminate
|
||||||
|
|
||||||
|
@ -383,7 +383,7 @@ func (check *Checker) collectObjects() {
|
|||||||
info := &declInfo{file: fileScope, fdecl: d.decl}
|
info := &declInfo{file: fileScope, fdecl: d.decl}
|
||||||
name := d.decl.Name.Name
|
name := d.decl.Name.Name
|
||||||
obj := NewFunc(d.decl.Name.Pos(), pkg, name, nil)
|
obj := NewFunc(d.decl.Name.Pos(), pkg, name, nil)
|
||||||
if !d.decl.IsMethod() {
|
if d.decl.Recv.NumFields() == 0 {
|
||||||
// regular function
|
// regular function
|
||||||
if d.decl.Recv != nil {
|
if d.decl.Recv != nil {
|
||||||
check.error(d.decl.Recv, _BadRecv, "method is missing receiver")
|
check.error(d.decl.Recv, _BadRecv, "method is missing receiver")
|
||||||
|
@ -135,6 +135,9 @@ func (s sanitizer) typ(typ Type) Type {
|
|||||||
}
|
}
|
||||||
|
|
||||||
case *Named:
|
case *Named:
|
||||||
|
if debug && t.check != nil {
|
||||||
|
panic("internal error: Named.check != nil")
|
||||||
|
}
|
||||||
if orig := s.typ(t.orig); orig != t.orig {
|
if orig := s.typ(t.orig); orig != t.orig {
|
||||||
t.orig = orig
|
t.orig = orig
|
||||||
}
|
}
|
||||||
|
@ -134,7 +134,8 @@ func testTestDir(t *testing.T, path string, ignore ...string) {
|
|||||||
// parse and type-check file
|
// parse and type-check file
|
||||||
file, err := parser.ParseFile(fset, filename, nil, 0)
|
file, err := parser.ParseFile(fset, filename, nil, 0)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
conf := Config{GoVersion: goVersion, Importer: stdLibImporter}
|
conf := Config{Importer: stdLibImporter}
|
||||||
|
SetGoVersion(&conf, goVersion)
|
||||||
_, err = conf.Check(filename, fset, []*ast.File{file}, nil)
|
_, err = conf.Check(filename, fset, []*ast.File{file}, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -644,7 +644,7 @@ func (c *Chan) Elem() Type { return c.elem }
|
|||||||
|
|
||||||
// A Named represents a named (defined) type.
|
// A Named represents a named (defined) type.
|
||||||
type Named struct {
|
type Named struct {
|
||||||
check *Checker // for Named.under implementation
|
check *Checker // for Named.under implementation; nilled once under has been called
|
||||||
info typeInfo // for cycle detection
|
info typeInfo // for cycle detection
|
||||||
obj *TypeName // corresponding declared object
|
obj *TypeName // corresponding declared object
|
||||||
orig Type // type (on RHS of declaration) this *Named type is derived of (for cycle reporting)
|
orig Type // type (on RHS of declaration) this *Named type is derived of (for cycle reporting)
|
||||||
@ -661,11 +661,7 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
|
|||||||
if _, ok := underlying.(*Named); ok {
|
if _, ok := underlying.(*Named); ok {
|
||||||
panic("types.NewNamed: underlying type must not be *Named")
|
panic("types.NewNamed: underlying type must not be *Named")
|
||||||
}
|
}
|
||||||
typ := &Named{obj: obj, orig: underlying, underlying: underlying, methods: methods}
|
return (*Checker)(nil).newNamed(obj, underlying, methods)
|
||||||
if obj.typ == nil {
|
|
||||||
obj.typ = typ
|
|
||||||
}
|
|
||||||
return typ
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
|
func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
|
||||||
@ -673,6 +669,23 @@ func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func)
|
|||||||
if obj.typ == nil {
|
if obj.typ == nil {
|
||||||
obj.typ = typ
|
obj.typ = typ
|
||||||
}
|
}
|
||||||
|
// Ensure that typ is always expanded, at which point the check field can be
|
||||||
|
// nilled out.
|
||||||
|
//
|
||||||
|
// Note that currently we cannot nil out check inside typ.under(), because
|
||||||
|
// it's possible that typ is expanded multiple times.
|
||||||
|
//
|
||||||
|
// TODO(rFindley): clean this up so that under is the only function mutating
|
||||||
|
// named types.
|
||||||
|
if check != nil {
|
||||||
|
check.later(func() {
|
||||||
|
switch typ.under().(type) {
|
||||||
|
case *Named, *instance:
|
||||||
|
panic("internal error: unexpanded underlying type")
|
||||||
|
}
|
||||||
|
typ.check = nil
|
||||||
|
})
|
||||||
|
}
|
||||||
return typ
|
return typ
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,3 +11,9 @@ import "sync/atomic"
|
|||||||
// for tests where we may want to have a consistent
|
// for tests where we may want to have a consistent
|
||||||
// numbering for each individual test case.
|
// numbering for each individual test case.
|
||||||
func ResetId() { atomic.StoreUint32(&lastId, 0) }
|
func ResetId() { atomic.StoreUint32(&lastId, 0) }
|
||||||
|
|
||||||
|
// SetGoVersion sets the unexported goVersion field on config, so that tests
|
||||||
|
// which assert on behavior for older Go versions can set it.
|
||||||
|
func SetGoVersion(config *Config, goVersion string) {
|
||||||
|
config.goVersion = goVersion
|
||||||
|
}
|
||||||
|
@ -8,6 +8,7 @@ package types
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"go/constant"
|
"go/constant"
|
||||||
|
"go/internal/typeparams"
|
||||||
"go/token"
|
"go/token"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
@ -237,7 +238,9 @@ func init() {
|
|||||||
defPredeclaredConsts()
|
defPredeclaredConsts()
|
||||||
defPredeclaredNil()
|
defPredeclaredNil()
|
||||||
defPredeclaredFuncs()
|
defPredeclaredFuncs()
|
||||||
defPredeclaredComparable()
|
if typeparams.Enabled {
|
||||||
|
defPredeclaredComparable()
|
||||||
|
}
|
||||||
|
|
||||||
universeIota = Universe.Lookup("iota").(*Const)
|
universeIota = Universe.Lookup("iota").(*Const)
|
||||||
universeByte = Universe.Lookup("byte").(*TypeName).typ.(*Basic)
|
universeByte = Universe.Lookup("byte").(*TypeName).typ.(*Basic)
|
||||||
|
@ -267,20 +267,6 @@ func divWW(x1, x0, y, m Word) (q, r Word) {
|
|||||||
return Word(qq), Word(r0 >> s)
|
return Word(qq), Word(r0 >> s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func divWVW(z []Word, xn Word, x []Word, y Word) (r Word) {
|
|
||||||
r = xn
|
|
||||||
if len(x) == 1 {
|
|
||||||
qq, rr := bits.Div(uint(r), uint(x[0]), uint(y))
|
|
||||||
z[0] = Word(qq)
|
|
||||||
return Word(rr)
|
|
||||||
}
|
|
||||||
rec := reciprocalWord(y)
|
|
||||||
for i := len(z) - 1; i >= 0; i-- {
|
|
||||||
z[i], r = divWW(r, x[i], y, rec)
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// reciprocalWord return the reciprocal of the divisor. rec = floor(( _B^2 - 1 ) / u - _B). u = d1 << nlz(d1).
|
// reciprocalWord return the reciprocal of the divisor. rec = floor(( _B^2 - 1 ) / u - _B). u = d1 << nlz(d1).
|
||||||
func reciprocalWord(d1 Word) Word {
|
func reciprocalWord(d1 Word) Word {
|
||||||
u := uint(d1 << nlz(d1))
|
u := uint(d1 << nlz(d1))
|
||||||
|
@ -631,48 +631,6 @@ func (z nat) mulRange(a, b uint64) nat {
|
|||||||
return z.mul(nat(nil).mulRange(a, m), nat(nil).mulRange(m+1, b))
|
return z.mul(nat(nil).mulRange(a, m), nat(nil).mulRange(m+1, b))
|
||||||
}
|
}
|
||||||
|
|
||||||
// q = (x-r)/y, with 0 <= r < y
|
|
||||||
func (z nat) divW(x nat, y Word) (q nat, r Word) {
|
|
||||||
m := len(x)
|
|
||||||
switch {
|
|
||||||
case y == 0:
|
|
||||||
panic("division by zero")
|
|
||||||
case y == 1:
|
|
||||||
q = z.set(x) // result is x
|
|
||||||
return
|
|
||||||
case m == 0:
|
|
||||||
q = z[:0] // result is 0
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// m > 0
|
|
||||||
z = z.make(m)
|
|
||||||
r = divWVW(z, 0, x, y)
|
|
||||||
q = z.norm()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (z nat) div(z2, u, v nat) (q, r nat) {
|
|
||||||
if len(v) == 0 {
|
|
||||||
panic("division by zero")
|
|
||||||
}
|
|
||||||
|
|
||||||
if u.cmp(v) < 0 {
|
|
||||||
q = z[:0]
|
|
||||||
r = z2.set(u)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(v) == 1 {
|
|
||||||
var r2 Word
|
|
||||||
q, r2 = z.divW(u, v[0])
|
|
||||||
r = z2.setWord(r2)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
q, r = z.divLarge(z2, u, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// getNat returns a *nat of len n. The contents may not be zero.
|
// getNat returns a *nat of len n. The contents may not be zero.
|
||||||
// The pool holds *nat to avoid allocation when converting to interface{}.
|
// The pool holds *nat to avoid allocation when converting to interface{}.
|
||||||
func getNat(n int) *nat {
|
func getNat(n int) *nat {
|
||||||
@ -693,276 +651,6 @@ func putNat(x *nat) {
|
|||||||
|
|
||||||
var natPool sync.Pool
|
var natPool sync.Pool
|
||||||
|
|
||||||
// q = (uIn-r)/vIn, with 0 <= r < vIn
|
|
||||||
// Uses z as storage for q, and u as storage for r if possible.
|
|
||||||
// See Knuth, Volume 2, section 4.3.1, Algorithm D.
|
|
||||||
// Preconditions:
|
|
||||||
// len(vIn) >= 2
|
|
||||||
// len(uIn) >= len(vIn)
|
|
||||||
// u must not alias z
|
|
||||||
func (z nat) divLarge(u, uIn, vIn nat) (q, r nat) {
|
|
||||||
n := len(vIn)
|
|
||||||
m := len(uIn) - n
|
|
||||||
|
|
||||||
// D1.
|
|
||||||
shift := nlz(vIn[n-1])
|
|
||||||
// do not modify vIn, it may be used by another goroutine simultaneously
|
|
||||||
vp := getNat(n)
|
|
||||||
v := *vp
|
|
||||||
shlVU(v, vIn, shift)
|
|
||||||
|
|
||||||
// u may safely alias uIn or vIn, the value of uIn is used to set u and vIn was already used
|
|
||||||
u = u.make(len(uIn) + 1)
|
|
||||||
u[len(uIn)] = shlVU(u[0:len(uIn)], uIn, shift)
|
|
||||||
|
|
||||||
// z may safely alias uIn or vIn, both values were used already
|
|
||||||
if alias(z, u) {
|
|
||||||
z = nil // z is an alias for u - cannot reuse
|
|
||||||
}
|
|
||||||
q = z.make(m + 1)
|
|
||||||
|
|
||||||
if n < divRecursiveThreshold {
|
|
||||||
q.divBasic(u, v)
|
|
||||||
} else {
|
|
||||||
q.divRecursive(u, v)
|
|
||||||
}
|
|
||||||
putNat(vp)
|
|
||||||
|
|
||||||
q = q.norm()
|
|
||||||
shrVU(u, u, shift)
|
|
||||||
r = u.norm()
|
|
||||||
|
|
||||||
return q, r
|
|
||||||
}
|
|
||||||
|
|
||||||
// divBasic performs word-by-word division of u by v.
|
|
||||||
// The quotient is written in pre-allocated q.
|
|
||||||
// The remainder overwrites input u.
|
|
||||||
//
|
|
||||||
// Precondition:
|
|
||||||
// - q is large enough to hold the quotient u / v
|
|
||||||
// which has a maximum length of len(u)-len(v)+1.
|
|
||||||
func (q nat) divBasic(u, v nat) {
|
|
||||||
n := len(v)
|
|
||||||
m := len(u) - n
|
|
||||||
|
|
||||||
qhatvp := getNat(n + 1)
|
|
||||||
qhatv := *qhatvp
|
|
||||||
|
|
||||||
// D2.
|
|
||||||
vn1 := v[n-1]
|
|
||||||
rec := reciprocalWord(vn1)
|
|
||||||
for j := m; j >= 0; j-- {
|
|
||||||
// D3.
|
|
||||||
qhat := Word(_M)
|
|
||||||
var ujn Word
|
|
||||||
if j+n < len(u) {
|
|
||||||
ujn = u[j+n]
|
|
||||||
}
|
|
||||||
if ujn != vn1 {
|
|
||||||
var rhat Word
|
|
||||||
qhat, rhat = divWW(ujn, u[j+n-1], vn1, rec)
|
|
||||||
|
|
||||||
// x1 | x2 = q̂v_{n-2}
|
|
||||||
vn2 := v[n-2]
|
|
||||||
x1, x2 := mulWW(qhat, vn2)
|
|
||||||
// test if q̂v_{n-2} > br̂ + u_{j+n-2}
|
|
||||||
ujn2 := u[j+n-2]
|
|
||||||
for greaterThan(x1, x2, rhat, ujn2) {
|
|
||||||
qhat--
|
|
||||||
prevRhat := rhat
|
|
||||||
rhat += vn1
|
|
||||||
// v[n-1] >= 0, so this tests for overflow.
|
|
||||||
if rhat < prevRhat {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
x1, x2 = mulWW(qhat, vn2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// D4.
|
|
||||||
// Compute the remainder u - (q̂*v) << (_W*j).
|
|
||||||
// The subtraction may overflow if q̂ estimate was off by one.
|
|
||||||
qhatv[n] = mulAddVWW(qhatv[0:n], v, qhat, 0)
|
|
||||||
qhl := len(qhatv)
|
|
||||||
if j+qhl > len(u) && qhatv[n] == 0 {
|
|
||||||
qhl--
|
|
||||||
}
|
|
||||||
c := subVV(u[j:j+qhl], u[j:], qhatv)
|
|
||||||
if c != 0 {
|
|
||||||
c := addVV(u[j:j+n], u[j:], v)
|
|
||||||
// If n == qhl, the carry from subVV and the carry from addVV
|
|
||||||
// cancel out and don't affect u[j+n].
|
|
||||||
if n < qhl {
|
|
||||||
u[j+n] += c
|
|
||||||
}
|
|
||||||
qhat--
|
|
||||||
}
|
|
||||||
|
|
||||||
if j == m && m == len(q) && qhat == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
q[j] = qhat
|
|
||||||
}
|
|
||||||
|
|
||||||
putNat(qhatvp)
|
|
||||||
}
|
|
||||||
|
|
||||||
const divRecursiveThreshold = 100
|
|
||||||
|
|
||||||
// divRecursive performs word-by-word division of u by v.
|
|
||||||
// The quotient is written in pre-allocated z.
|
|
||||||
// The remainder overwrites input u.
|
|
||||||
//
|
|
||||||
// Precondition:
|
|
||||||
// - len(z) >= len(u)-len(v)
|
|
||||||
//
|
|
||||||
// See Burnikel, Ziegler, "Fast Recursive Division", Algorithm 1 and 2.
|
|
||||||
func (z nat) divRecursive(u, v nat) {
|
|
||||||
// Recursion depth is less than 2 log2(len(v))
|
|
||||||
// Allocate a slice of temporaries to be reused across recursion.
|
|
||||||
recDepth := 2 * bits.Len(uint(len(v)))
|
|
||||||
// large enough to perform Karatsuba on operands as large as v
|
|
||||||
tmp := getNat(3 * len(v))
|
|
||||||
temps := make([]*nat, recDepth)
|
|
||||||
z.clear()
|
|
||||||
z.divRecursiveStep(u, v, 0, tmp, temps)
|
|
||||||
for _, n := range temps {
|
|
||||||
if n != nil {
|
|
||||||
putNat(n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
putNat(tmp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// divRecursiveStep computes the division of u by v.
|
|
||||||
// - z must be large enough to hold the quotient
|
|
||||||
// - the quotient will overwrite z
|
|
||||||
// - the remainder will overwrite u
|
|
||||||
func (z nat) divRecursiveStep(u, v nat, depth int, tmp *nat, temps []*nat) {
|
|
||||||
u = u.norm()
|
|
||||||
v = v.norm()
|
|
||||||
|
|
||||||
if len(u) == 0 {
|
|
||||||
z.clear()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
n := len(v)
|
|
||||||
if n < divRecursiveThreshold {
|
|
||||||
z.divBasic(u, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m := len(u) - n
|
|
||||||
if m < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Produce the quotient by blocks of B words.
|
|
||||||
// Division by v (length n) is done using a length n/2 division
|
|
||||||
// and a length n/2 multiplication for each block. The final
|
|
||||||
// complexity is driven by multiplication complexity.
|
|
||||||
B := n / 2
|
|
||||||
|
|
||||||
// Allocate a nat for qhat below.
|
|
||||||
if temps[depth] == nil {
|
|
||||||
temps[depth] = getNat(n)
|
|
||||||
} else {
|
|
||||||
*temps[depth] = temps[depth].make(B + 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
j := m
|
|
||||||
for j > B {
|
|
||||||
// Divide u[j-B:j+n] by vIn. Keep remainder in u
|
|
||||||
// for next block.
|
|
||||||
//
|
|
||||||
// The following property will be used (Lemma 2):
|
|
||||||
// if u = u1 << s + u0
|
|
||||||
// v = v1 << s + v0
|
|
||||||
// then floor(u1/v1) >= floor(u/v)
|
|
||||||
//
|
|
||||||
// Moreover, the difference is at most 2 if len(v1) >= len(u/v)
|
|
||||||
// We choose s = B-1 since len(v)-s >= B+1 >= len(u/v)
|
|
||||||
s := (B - 1)
|
|
||||||
// Except for the first step, the top bits are always
|
|
||||||
// a division remainder, so the quotient length is <= n.
|
|
||||||
uu := u[j-B:]
|
|
||||||
|
|
||||||
qhat := *temps[depth]
|
|
||||||
qhat.clear()
|
|
||||||
qhat.divRecursiveStep(uu[s:B+n], v[s:], depth+1, tmp, temps)
|
|
||||||
qhat = qhat.norm()
|
|
||||||
// Adjust the quotient:
|
|
||||||
// u = u_h << s + u_l
|
|
||||||
// v = v_h << s + v_l
|
|
||||||
// u_h = q̂ v_h + rh
|
|
||||||
// u = q̂ (v - v_l) + rh << s + u_l
|
|
||||||
// After the above step, u contains a remainder:
|
|
||||||
// u = rh << s + u_l
|
|
||||||
// and we need to subtract q̂ v_l
|
|
||||||
//
|
|
||||||
// But it may be a bit too large, in which case q̂ needs to be smaller.
|
|
||||||
qhatv := tmp.make(3 * n)
|
|
||||||
qhatv.clear()
|
|
||||||
qhatv = qhatv.mul(qhat, v[:s])
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
e := qhatv.cmp(uu.norm())
|
|
||||||
if e <= 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
subVW(qhat, qhat, 1)
|
|
||||||
c := subVV(qhatv[:s], qhatv[:s], v[:s])
|
|
||||||
if len(qhatv) > s {
|
|
||||||
subVW(qhatv[s:], qhatv[s:], c)
|
|
||||||
}
|
|
||||||
addAt(uu[s:], v[s:], 0)
|
|
||||||
}
|
|
||||||
if qhatv.cmp(uu.norm()) > 0 {
|
|
||||||
panic("impossible")
|
|
||||||
}
|
|
||||||
c := subVV(uu[:len(qhatv)], uu[:len(qhatv)], qhatv)
|
|
||||||
if c > 0 {
|
|
||||||
subVW(uu[len(qhatv):], uu[len(qhatv):], c)
|
|
||||||
}
|
|
||||||
addAt(z, qhat, j-B)
|
|
||||||
j -= B
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now u < (v<<B), compute lower bits in the same way.
|
|
||||||
// Choose shift = B-1 again.
|
|
||||||
s := B - 1
|
|
||||||
qhat := *temps[depth]
|
|
||||||
qhat.clear()
|
|
||||||
qhat.divRecursiveStep(u[s:].norm(), v[s:], depth+1, tmp, temps)
|
|
||||||
qhat = qhat.norm()
|
|
||||||
qhatv := tmp.make(3 * n)
|
|
||||||
qhatv.clear()
|
|
||||||
qhatv = qhatv.mul(qhat, v[:s])
|
|
||||||
// Set the correct remainder as before.
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
if e := qhatv.cmp(u.norm()); e > 0 {
|
|
||||||
subVW(qhat, qhat, 1)
|
|
||||||
c := subVV(qhatv[:s], qhatv[:s], v[:s])
|
|
||||||
if len(qhatv) > s {
|
|
||||||
subVW(qhatv[s:], qhatv[s:], c)
|
|
||||||
}
|
|
||||||
addAt(u[s:], v[s:], 0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if qhatv.cmp(u.norm()) > 0 {
|
|
||||||
panic("impossible")
|
|
||||||
}
|
|
||||||
c := subVV(u[0:len(qhatv)], u[0:len(qhatv)], qhatv)
|
|
||||||
if c > 0 {
|
|
||||||
c = subVW(u[len(qhatv):], u[len(qhatv):], c)
|
|
||||||
}
|
|
||||||
if c > 0 {
|
|
||||||
panic("impossible")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Done!
|
|
||||||
addAt(z, qhat.norm(), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Length of x in bits. x must be normalized.
|
// Length of x in bits. x must be normalized.
|
||||||
func (x nat) bitLen() int {
|
func (x nat) bitLen() int {
|
||||||
if i := len(x) - 1; i >= 0 {
|
if i := len(x) - 1; i >= 0 {
|
||||||
@ -1170,19 +858,6 @@ func (z nat) xor(x, y nat) nat {
|
|||||||
return z.norm()
|
return z.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
// greaterThan reports whether (x1<<_W + x2) > (y1<<_W + y2)
|
|
||||||
func greaterThan(x1, x2, y1, y2 Word) bool {
|
|
||||||
return x1 > y1 || x1 == y1 && x2 > y2
|
|
||||||
}
|
|
||||||
|
|
||||||
// modW returns x % d.
|
|
||||||
func (x nat) modW(d Word) (r Word) {
|
|
||||||
// TODO(agl): we don't actually need to store the q value.
|
|
||||||
var q nat
|
|
||||||
q = q.make(len(x))
|
|
||||||
return divWVW(q, 0, x, d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// random creates a random integer in [0..limit), using the space in z if
|
// random creates a random integer in [0..limit), using the space in z if
|
||||||
// possible. n is the bit length of limit.
|
// possible. n is the bit length of limit.
|
||||||
func (z nat) random(rand *rand.Rand, limit nat, n int) nat {
|
func (z nat) random(rand *rand.Rand, limit nat, n int) nat {
|
||||||
|
346
src/math/big/natdiv.go
Normal file
346
src/math/big/natdiv.go
Normal file
@ -0,0 +1,346 @@
|
|||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package big
|
||||||
|
|
||||||
|
import "math/bits"
|
||||||
|
|
||||||
|
func (z nat) div(z2, u, v nat) (q, r nat) {
|
||||||
|
if len(v) == 0 {
|
||||||
|
panic("division by zero")
|
||||||
|
}
|
||||||
|
|
||||||
|
if u.cmp(v) < 0 {
|
||||||
|
q = z[:0]
|
||||||
|
r = z2.set(u)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(v) == 1 {
|
||||||
|
var r2 Word
|
||||||
|
q, r2 = z.divW(u, v[0])
|
||||||
|
r = z2.setWord(r2)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
q, r = z.divLarge(z2, u, v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// q = (x-r)/y, with 0 <= r < y
|
||||||
|
func (z nat) divW(x nat, y Word) (q nat, r Word) {
|
||||||
|
m := len(x)
|
||||||
|
switch {
|
||||||
|
case y == 0:
|
||||||
|
panic("division by zero")
|
||||||
|
case y == 1:
|
||||||
|
q = z.set(x) // result is x
|
||||||
|
return
|
||||||
|
case m == 0:
|
||||||
|
q = z[:0] // result is 0
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// m > 0
|
||||||
|
z = z.make(m)
|
||||||
|
r = divWVW(z, 0, x, y)
|
||||||
|
q = z.norm()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// modW returns x % d.
|
||||||
|
func (x nat) modW(d Word) (r Word) {
|
||||||
|
// TODO(agl): we don't actually need to store the q value.
|
||||||
|
var q nat
|
||||||
|
q = q.make(len(x))
|
||||||
|
return divWVW(q, 0, x, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func divWVW(z []Word, xn Word, x []Word, y Word) (r Word) {
|
||||||
|
r = xn
|
||||||
|
if len(x) == 1 {
|
||||||
|
qq, rr := bits.Div(uint(r), uint(x[0]), uint(y))
|
||||||
|
z[0] = Word(qq)
|
||||||
|
return Word(rr)
|
||||||
|
}
|
||||||
|
rec := reciprocalWord(y)
|
||||||
|
for i := len(z) - 1; i >= 0; i-- {
|
||||||
|
z[i], r = divWW(r, x[i], y, rec)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// q = (uIn-r)/vIn, with 0 <= r < vIn
|
||||||
|
// Uses z as storage for q, and u as storage for r if possible.
|
||||||
|
// See Knuth, Volume 2, section 4.3.1, Algorithm D.
|
||||||
|
// Preconditions:
|
||||||
|
// len(vIn) >= 2
|
||||||
|
// len(uIn) >= len(vIn)
|
||||||
|
// u must not alias z
|
||||||
|
func (z nat) divLarge(u, uIn, vIn nat) (q, r nat) {
|
||||||
|
n := len(vIn)
|
||||||
|
m := len(uIn) - n
|
||||||
|
|
||||||
|
// D1.
|
||||||
|
shift := nlz(vIn[n-1])
|
||||||
|
// do not modify vIn, it may be used by another goroutine simultaneously
|
||||||
|
vp := getNat(n)
|
||||||
|
v := *vp
|
||||||
|
shlVU(v, vIn, shift)
|
||||||
|
|
||||||
|
// u may safely alias uIn or vIn, the value of uIn is used to set u and vIn was already used
|
||||||
|
u = u.make(len(uIn) + 1)
|
||||||
|
u[len(uIn)] = shlVU(u[0:len(uIn)], uIn, shift)
|
||||||
|
|
||||||
|
// z may safely alias uIn or vIn, both values were used already
|
||||||
|
if alias(z, u) {
|
||||||
|
z = nil // z is an alias for u - cannot reuse
|
||||||
|
}
|
||||||
|
q = z.make(m + 1)
|
||||||
|
|
||||||
|
if n < divRecursiveThreshold {
|
||||||
|
q.divBasic(u, v)
|
||||||
|
} else {
|
||||||
|
q.divRecursive(u, v)
|
||||||
|
}
|
||||||
|
putNat(vp)
|
||||||
|
|
||||||
|
q = q.norm()
|
||||||
|
shrVU(u, u, shift)
|
||||||
|
r = u.norm()
|
||||||
|
|
||||||
|
return q, r
|
||||||
|
}
|
||||||
|
|
||||||
|
// divBasic performs word-by-word division of u by v.
|
||||||
|
// The quotient is written in pre-allocated q.
|
||||||
|
// The remainder overwrites input u.
|
||||||
|
//
|
||||||
|
// Precondition:
|
||||||
|
// - q is large enough to hold the quotient u / v
|
||||||
|
// which has a maximum length of len(u)-len(v)+1.
|
||||||
|
func (q nat) divBasic(u, v nat) {
|
||||||
|
n := len(v)
|
||||||
|
m := len(u) - n
|
||||||
|
|
||||||
|
qhatvp := getNat(n + 1)
|
||||||
|
qhatv := *qhatvp
|
||||||
|
|
||||||
|
// D2.
|
||||||
|
vn1 := v[n-1]
|
||||||
|
rec := reciprocalWord(vn1)
|
||||||
|
for j := m; j >= 0; j-- {
|
||||||
|
// D3.
|
||||||
|
qhat := Word(_M)
|
||||||
|
var ujn Word
|
||||||
|
if j+n < len(u) {
|
||||||
|
ujn = u[j+n]
|
||||||
|
}
|
||||||
|
if ujn != vn1 {
|
||||||
|
var rhat Word
|
||||||
|
qhat, rhat = divWW(ujn, u[j+n-1], vn1, rec)
|
||||||
|
|
||||||
|
// x1 | x2 = q̂v_{n-2}
|
||||||
|
vn2 := v[n-2]
|
||||||
|
x1, x2 := mulWW(qhat, vn2)
|
||||||
|
// test if q̂v_{n-2} > br̂ + u_{j+n-2}
|
||||||
|
ujn2 := u[j+n-2]
|
||||||
|
for greaterThan(x1, x2, rhat, ujn2) {
|
||||||
|
qhat--
|
||||||
|
prevRhat := rhat
|
||||||
|
rhat += vn1
|
||||||
|
// v[n-1] >= 0, so this tests for overflow.
|
||||||
|
if rhat < prevRhat {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
x1, x2 = mulWW(qhat, vn2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// D4.
|
||||||
|
// Compute the remainder u - (q̂*v) << (_W*j).
|
||||||
|
// The subtraction may overflow if q̂ estimate was off by one.
|
||||||
|
qhatv[n] = mulAddVWW(qhatv[0:n], v, qhat, 0)
|
||||||
|
qhl := len(qhatv)
|
||||||
|
if j+qhl > len(u) && qhatv[n] == 0 {
|
||||||
|
qhl--
|
||||||
|
}
|
||||||
|
c := subVV(u[j:j+qhl], u[j:], qhatv)
|
||||||
|
if c != 0 {
|
||||||
|
c := addVV(u[j:j+n], u[j:], v)
|
||||||
|
// If n == qhl, the carry from subVV and the carry from addVV
|
||||||
|
// cancel out and don't affect u[j+n].
|
||||||
|
if n < qhl {
|
||||||
|
u[j+n] += c
|
||||||
|
}
|
||||||
|
qhat--
|
||||||
|
}
|
||||||
|
|
||||||
|
if j == m && m == len(q) && qhat == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
q[j] = qhat
|
||||||
|
}
|
||||||
|
|
||||||
|
putNat(qhatvp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// greaterThan reports whether (x1<<_W + x2) > (y1<<_W + y2)
|
||||||
|
func greaterThan(x1, x2, y1, y2 Word) bool {
|
||||||
|
return x1 > y1 || x1 == y1 && x2 > y2
|
||||||
|
}
|
||||||
|
|
||||||
|
const divRecursiveThreshold = 100
|
||||||
|
|
||||||
|
// divRecursive performs word-by-word division of u by v.
|
||||||
|
// The quotient is written in pre-allocated z.
|
||||||
|
// The remainder overwrites input u.
|
||||||
|
//
|
||||||
|
// Precondition:
|
||||||
|
// - len(z) >= len(u)-len(v)
|
||||||
|
//
|
||||||
|
// See Burnikel, Ziegler, "Fast Recursive Division", Algorithm 1 and 2.
|
||||||
|
func (z nat) divRecursive(u, v nat) {
|
||||||
|
// Recursion depth is less than 2 log2(len(v))
|
||||||
|
// Allocate a slice of temporaries to be reused across recursion.
|
||||||
|
recDepth := 2 * bits.Len(uint(len(v)))
|
||||||
|
// large enough to perform Karatsuba on operands as large as v
|
||||||
|
tmp := getNat(3 * len(v))
|
||||||
|
temps := make([]*nat, recDepth)
|
||||||
|
z.clear()
|
||||||
|
z.divRecursiveStep(u, v, 0, tmp, temps)
|
||||||
|
for _, n := range temps {
|
||||||
|
if n != nil {
|
||||||
|
putNat(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
putNat(tmp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// divRecursiveStep computes the division of u by v.
|
||||||
|
// - z must be large enough to hold the quotient
|
||||||
|
// - the quotient will overwrite z
|
||||||
|
// - the remainder will overwrite u
|
||||||
|
func (z nat) divRecursiveStep(u, v nat, depth int, tmp *nat, temps []*nat) {
|
||||||
|
u = u.norm()
|
||||||
|
v = v.norm()
|
||||||
|
|
||||||
|
if len(u) == 0 {
|
||||||
|
z.clear()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n := len(v)
|
||||||
|
if n < divRecursiveThreshold {
|
||||||
|
z.divBasic(u, v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m := len(u) - n
|
||||||
|
if m < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Produce the quotient by blocks of B words.
|
||||||
|
// Division by v (length n) is done using a length n/2 division
|
||||||
|
// and a length n/2 multiplication for each block. The final
|
||||||
|
// complexity is driven by multiplication complexity.
|
||||||
|
B := n / 2
|
||||||
|
|
||||||
|
// Allocate a nat for qhat below.
|
||||||
|
if temps[depth] == nil {
|
||||||
|
temps[depth] = getNat(n)
|
||||||
|
} else {
|
||||||
|
*temps[depth] = temps[depth].make(B + 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
j := m
|
||||||
|
for j > B {
|
||||||
|
// Divide u[j-B:j+n] by vIn. Keep remainder in u
|
||||||
|
// for next block.
|
||||||
|
//
|
||||||
|
// The following property will be used (Lemma 2):
|
||||||
|
// if u = u1 << s + u0
|
||||||
|
// v = v1 << s + v0
|
||||||
|
// then floor(u1/v1) >= floor(u/v)
|
||||||
|
//
|
||||||
|
// Moreover, the difference is at most 2 if len(v1) >= len(u/v)
|
||||||
|
// We choose s = B-1 since len(v)-s >= B+1 >= len(u/v)
|
||||||
|
s := (B - 1)
|
||||||
|
// Except for the first step, the top bits are always
|
||||||
|
// a division remainder, so the quotient length is <= n.
|
||||||
|
uu := u[j-B:]
|
||||||
|
|
||||||
|
qhat := *temps[depth]
|
||||||
|
qhat.clear()
|
||||||
|
qhat.divRecursiveStep(uu[s:B+n], v[s:], depth+1, tmp, temps)
|
||||||
|
qhat = qhat.norm()
|
||||||
|
// Adjust the quotient:
|
||||||
|
// u = u_h << s + u_l
|
||||||
|
// v = v_h << s + v_l
|
||||||
|
// u_h = q̂ v_h + rh
|
||||||
|
// u = q̂ (v - v_l) + rh << s + u_l
|
||||||
|
// After the above step, u contains a remainder:
|
||||||
|
// u = rh << s + u_l
|
||||||
|
// and we need to subtract q̂ v_l
|
||||||
|
//
|
||||||
|
// But it may be a bit too large, in which case q̂ needs to be smaller.
|
||||||
|
qhatv := tmp.make(3 * n)
|
||||||
|
qhatv.clear()
|
||||||
|
qhatv = qhatv.mul(qhat, v[:s])
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
e := qhatv.cmp(uu.norm())
|
||||||
|
if e <= 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
subVW(qhat, qhat, 1)
|
||||||
|
c := subVV(qhatv[:s], qhatv[:s], v[:s])
|
||||||
|
if len(qhatv) > s {
|
||||||
|
subVW(qhatv[s:], qhatv[s:], c)
|
||||||
|
}
|
||||||
|
addAt(uu[s:], v[s:], 0)
|
||||||
|
}
|
||||||
|
if qhatv.cmp(uu.norm()) > 0 {
|
||||||
|
panic("impossible")
|
||||||
|
}
|
||||||
|
c := subVV(uu[:len(qhatv)], uu[:len(qhatv)], qhatv)
|
||||||
|
if c > 0 {
|
||||||
|
subVW(uu[len(qhatv):], uu[len(qhatv):], c)
|
||||||
|
}
|
||||||
|
addAt(z, qhat, j-B)
|
||||||
|
j -= B
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now u < (v<<B), compute lower bits in the same way.
|
||||||
|
// Choose shift = B-1 again.
|
||||||
|
s := B - 1
|
||||||
|
qhat := *temps[depth]
|
||||||
|
qhat.clear()
|
||||||
|
qhat.divRecursiveStep(u[s:].norm(), v[s:], depth+1, tmp, temps)
|
||||||
|
qhat = qhat.norm()
|
||||||
|
qhatv := tmp.make(3 * n)
|
||||||
|
qhatv.clear()
|
||||||
|
qhatv = qhatv.mul(qhat, v[:s])
|
||||||
|
// Set the correct remainder as before.
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
if e := qhatv.cmp(u.norm()); e > 0 {
|
||||||
|
subVW(qhat, qhat, 1)
|
||||||
|
c := subVV(qhatv[:s], qhatv[:s], v[:s])
|
||||||
|
if len(qhatv) > s {
|
||||||
|
subVW(qhatv[s:], qhatv[s:], c)
|
||||||
|
}
|
||||||
|
addAt(u[s:], v[s:], 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if qhatv.cmp(u.norm()) > 0 {
|
||||||
|
panic("impossible")
|
||||||
|
}
|
||||||
|
c := subVV(u[0:len(qhatv)], u[0:len(qhatv)], qhatv)
|
||||||
|
if c > 0 {
|
||||||
|
c = subVW(u[len(qhatv):], u[len(qhatv):], c)
|
||||||
|
}
|
||||||
|
if c > 0 {
|
||||||
|
panic("impossible")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done!
|
||||||
|
addAt(z, qhat.norm(), 0)
|
||||||
|
}
|
@ -96,9 +96,11 @@ func initMime() {
|
|||||||
// Extensions are looked up first case-sensitively, then case-insensitively.
|
// Extensions are looked up first case-sensitively, then case-insensitively.
|
||||||
//
|
//
|
||||||
// The built-in table is small but on unix it is augmented by the local
|
// The built-in table is small but on unix it is augmented by the local
|
||||||
// system's mime.types file(s) if available under one or more of these
|
// system's MIME-info database or mime.types file(s) if available under one or
|
||||||
// names:
|
// more of these names:
|
||||||
//
|
//
|
||||||
|
// /usr/local/share/mime/globs2
|
||||||
|
// /usr/share/mime/globs2
|
||||||
// /etc/mime.types
|
// /etc/mime.types
|
||||||
// /etc/apache2/mime.types
|
// /etc/apache2/mime.types
|
||||||
// /etc/apache/mime.types
|
// /etc/apache/mime.types
|
||||||
|
@ -1893,30 +1893,66 @@ func TestCVE202133195(t *testing.T) {
|
|||||||
return r, nil
|
return r, nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
r := Resolver{PreferGo: true, Dial: fake.DialContext}
|
r := Resolver{PreferGo: true, Dial: fake.DialContext}
|
||||||
|
// Change the default resolver to match our manipulated resolver
|
||||||
|
originalDefault := DefaultResolver
|
||||||
|
DefaultResolver = &r
|
||||||
|
defer func() {
|
||||||
|
DefaultResolver = originalDefault
|
||||||
|
}()
|
||||||
|
|
||||||
_, err := r.LookupCNAME(context.Background(), "golang.org")
|
_, err := r.LookupCNAME(context.Background(), "golang.org")
|
||||||
if expected := "lookup golang.org: CNAME target is invalid"; err.Error() != expected {
|
if expected := "lookup golang.org: CNAME target is invalid"; err == nil || err.Error() != expected {
|
||||||
|
t.Errorf("Resolver.LookupCNAME returned unexpected error, got %q, want %q", err.Error(), expected)
|
||||||
|
}
|
||||||
|
_, err = LookupCNAME("golang.org")
|
||||||
|
if expected := "lookup golang.org: CNAME target is invalid"; err == nil || err.Error() != expected {
|
||||||
t.Errorf("LookupCNAME returned unexpected error, got %q, want %q", err.Error(), expected)
|
t.Errorf("LookupCNAME returned unexpected error, got %q, want %q", err.Error(), expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = r.LookupSRV(context.Background(), "target", "tcp", "golang.org")
|
_, _, err = r.LookupSRV(context.Background(), "target", "tcp", "golang.org")
|
||||||
if expected := "lookup golang.org: SRV target is invalid"; err.Error() != expected {
|
if expected := "lookup golang.org: SRV target is invalid"; err == nil || err.Error() != expected {
|
||||||
|
t.Errorf("Resolver.LookupSRV returned unexpected error, got %q, want %q", err.Error(), expected)
|
||||||
|
}
|
||||||
|
_, _, err = LookupSRV("target", "tcp", "golang.org")
|
||||||
|
if expected := "lookup golang.org: SRV target is invalid"; err == nil || err.Error() != expected {
|
||||||
t.Errorf("LookupSRV returned unexpected error, got %q, want %q", err.Error(), expected)
|
t.Errorf("LookupSRV returned unexpected error, got %q, want %q", err.Error(), expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = r.LookupSRV(context.Background(), "hdr", "tcp", "golang.org")
|
_, _, err = r.LookupSRV(context.Background(), "hdr", "tcp", "golang.org")
|
||||||
if expected := "lookup golang.org: SRV header name is invalid"; err.Error() != expected {
|
if expected := "lookup golang.org: SRV header name is invalid"; err == nil || err.Error() != expected {
|
||||||
|
t.Errorf("Resolver.LookupSRV returned unexpected error, got %q, want %q", err.Error(), expected)
|
||||||
|
}
|
||||||
|
_, _, err = LookupSRV("hdr", "tcp", "golang.org")
|
||||||
|
if expected := "lookup golang.org: SRV header name is invalid"; err == nil || err.Error() != expected {
|
||||||
t.Errorf("LookupSRV returned unexpected error, got %q, want %q", err.Error(), expected)
|
t.Errorf("LookupSRV returned unexpected error, got %q, want %q", err.Error(), expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = r.LookupMX(context.Background(), "golang.org")
|
_, err = r.LookupMX(context.Background(), "golang.org")
|
||||||
if expected := "lookup golang.org: MX target is invalid"; err.Error() != expected {
|
if expected := "lookup golang.org: MX target is invalid"; err == nil || err.Error() != expected {
|
||||||
|
t.Errorf("Resolver.LookupMX returned unexpected error, got %q, want %q", err.Error(), expected)
|
||||||
|
}
|
||||||
|
_, err = LookupMX("golang.org")
|
||||||
|
if expected := "lookup golang.org: MX target is invalid"; err == nil || err.Error() != expected {
|
||||||
t.Errorf("LookupMX returned unexpected error, got %q, want %q", err.Error(), expected)
|
t.Errorf("LookupMX returned unexpected error, got %q, want %q", err.Error(), expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = r.LookupNS(context.Background(), "golang.org")
|
_, err = r.LookupNS(context.Background(), "golang.org")
|
||||||
if expected := "lookup golang.org: NS target is invalid"; err.Error() != expected {
|
if expected := "lookup golang.org: NS target is invalid"; err == nil || err.Error() != expected {
|
||||||
|
t.Errorf("Resolver.LookupNS returned unexpected error, got %q, want %q", err.Error(), expected)
|
||||||
|
}
|
||||||
|
_, err = LookupNS("golang.org")
|
||||||
|
if expected := "lookup golang.org: NS target is invalid"; err == nil || err.Error() != expected {
|
||||||
t.Errorf("LookupNS returned unexpected error, got %q, want %q", err.Error(), expected)
|
t.Errorf("LookupNS returned unexpected error, got %q, want %q", err.Error(), expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = r.LookupAddr(context.Background(), "1.2.3.4")
|
_, err = r.LookupAddr(context.Background(), "1.2.3.4")
|
||||||
if expected := "lookup 1.2.3.4: PTR target is invalid"; err.Error() != expected {
|
if expected := "lookup 1.2.3.4: PTR target is invalid"; err == nil || err.Error() != expected {
|
||||||
|
t.Errorf("Resolver.LookupAddr returned unexpected error, got %q, want %q", err.Error(), expected)
|
||||||
|
}
|
||||||
|
_, err = LookupAddr("1.2.3.4")
|
||||||
|
if expected := "lookup 1.2.3.4: PTR target is invalid"; err == nil || err.Error() != expected {
|
||||||
t.Errorf("LookupAddr returned unexpected error, got %q, want %q", err.Error(), expected)
|
t.Errorf("LookupAddr returned unexpected error, got %q, want %q", err.Error(), expected)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -433,8 +433,7 @@ func basicAuth(username, password string) string {
|
|||||||
// An error is returned if there were too many redirects or if there
|
// An error is returned if there were too many redirects or if there
|
||||||
// was an HTTP protocol error. A non-2xx response doesn't cause an
|
// was an HTTP protocol error. A non-2xx response doesn't cause an
|
||||||
// error. Any returned error will be of type *url.Error. The url.Error
|
// error. Any returned error will be of type *url.Error. The url.Error
|
||||||
// value's Timeout method will report true if request timed out or was
|
// value's Timeout method will report true if the request timed out.
|
||||||
// canceled.
|
|
||||||
//
|
//
|
||||||
// When err is nil, resp always contains a non-nil resp.Body.
|
// When err is nil, resp always contains a non-nil resp.Body.
|
||||||
// Caller should close resp.Body when done reading from it.
|
// Caller should close resp.Body when done reading from it.
|
||||||
@ -589,8 +588,7 @@ func urlErrorOp(method string) string {
|
|||||||
// standard library body types.
|
// standard library body types.
|
||||||
//
|
//
|
||||||
// Any returned error will be of type *url.Error. The url.Error
|
// Any returned error will be of type *url.Error. The url.Error
|
||||||
// value's Timeout method will report true if request timed out or was
|
// value's Timeout method will report true if the request timed out.
|
||||||
// canceled.
|
|
||||||
func (c *Client) Do(req *Request) (*Response, error) {
|
func (c *Client) Do(req *Request) (*Response, error) {
|
||||||
return c.do(req)
|
return c.do(req)
|
||||||
}
|
}
|
||||||
@ -729,7 +727,6 @@ func (c *Client) do(req *Request) (retres *Response, reterr error) {
|
|||||||
reqBodyClosed = true
|
reqBodyClosed = true
|
||||||
if !deadline.IsZero() && didTimeout() {
|
if !deadline.IsZero() && didTimeout() {
|
||||||
err = &httpError{
|
err = &httpError{
|
||||||
// TODO: early in cycle: s/Client.Timeout exceeded/timeout or context cancellation/
|
|
||||||
err: err.Error() + " (Client.Timeout exceeded while awaiting headers)",
|
err: err.Error() + " (Client.Timeout exceeded while awaiting headers)",
|
||||||
timeout: true,
|
timeout: true,
|
||||||
}
|
}
|
||||||
|
@ -253,22 +253,18 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
|
|||||||
// important is "Connection" because we want a persistent
|
// important is "Connection" because we want a persistent
|
||||||
// connection, regardless of what the client sent to us.
|
// connection, regardless of what the client sent to us.
|
||||||
for _, h := range hopHeaders {
|
for _, h := range hopHeaders {
|
||||||
hv := outreq.Header.Get(h)
|
|
||||||
if hv == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if h == "Te" && hv == "trailers" {
|
|
||||||
// Issue 21096: tell backend applications that
|
|
||||||
// care about trailer support that we support
|
|
||||||
// trailers. (We do, but we don't go out of
|
|
||||||
// our way to advertise that unless the
|
|
||||||
// incoming client request thought it was
|
|
||||||
// worth mentioning)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
outreq.Header.Del(h)
|
outreq.Header.Del(h)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Issue 21096: tell backend applications that care about trailer support
|
||||||
|
// that we support trailers. (We do, but we don't go out of our way to
|
||||||
|
// advertise that unless the incoming client request thought it was worth
|
||||||
|
// mentioning.) Note that we look at req.Header, not outreq.Header, since
|
||||||
|
// the latter has passed through removeConnectionHeaders.
|
||||||
|
if httpguts.HeaderValuesContainsToken(req.Header["Te"], "trailers") {
|
||||||
|
outreq.Header.Set("Te", "trailers")
|
||||||
|
}
|
||||||
|
|
||||||
// After stripping all the hop-by-hop connection headers above, add back any
|
// After stripping all the hop-by-hop connection headers above, add back any
|
||||||
// necessary for protocol upgrades, such as for websockets.
|
// necessary for protocol upgrades, such as for websockets.
|
||||||
if reqUpType != "" {
|
if reqUpType != "" {
|
||||||
|
@ -91,8 +91,9 @@ func TestReverseProxy(t *testing.T) {
|
|||||||
|
|
||||||
getReq, _ := http.NewRequest("GET", frontend.URL, nil)
|
getReq, _ := http.NewRequest("GET", frontend.URL, nil)
|
||||||
getReq.Host = "some-name"
|
getReq.Host = "some-name"
|
||||||
getReq.Header.Set("Connection", "close")
|
getReq.Header.Set("Connection", "close, TE")
|
||||||
getReq.Header.Set("Te", "trailers")
|
getReq.Header.Add("Te", "foo")
|
||||||
|
getReq.Header.Add("Te", "bar, trailers")
|
||||||
getReq.Header.Set("Proxy-Connection", "should be deleted")
|
getReq.Header.Set("Proxy-Connection", "should be deleted")
|
||||||
getReq.Header.Set("Upgrade", "foo")
|
getReq.Header.Set("Upgrade", "foo")
|
||||||
getReq.Close = true
|
getReq.Close = true
|
||||||
@ -236,6 +237,64 @@ func TestReverseProxyStripHeadersPresentInConnection(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReverseProxyStripEmptyConnection(t *testing.T) {
|
||||||
|
// See Issue 46313.
|
||||||
|
const backendResponse = "I am the backend"
|
||||||
|
|
||||||
|
// someConnHeader is some arbitrary header to be declared as a hop-by-hop header
|
||||||
|
// in the Request's Connection header.
|
||||||
|
const someConnHeader = "X-Some-Conn-Header"
|
||||||
|
|
||||||
|
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if c := r.Header.Values("Connection"); len(c) != 0 {
|
||||||
|
t.Errorf("handler got header %q = %v; want empty", "Connection", c)
|
||||||
|
}
|
||||||
|
if c := r.Header.Get(someConnHeader); c != "" {
|
||||||
|
t.Errorf("handler got header %q = %q; want empty", someConnHeader, c)
|
||||||
|
}
|
||||||
|
w.Header().Add("Connection", "")
|
||||||
|
w.Header().Add("Connection", someConnHeader)
|
||||||
|
w.Header().Set(someConnHeader, "should be deleted")
|
||||||
|
io.WriteString(w, backendResponse)
|
||||||
|
}))
|
||||||
|
defer backend.Close()
|
||||||
|
backendURL, err := url.Parse(backend.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
proxyHandler := NewSingleHostReverseProxy(backendURL)
|
||||||
|
frontend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
proxyHandler.ServeHTTP(w, r)
|
||||||
|
if c := r.Header.Get(someConnHeader); c != "should be deleted" {
|
||||||
|
t.Errorf("handler modified header %q = %q; want %q", someConnHeader, c, "should be deleted")
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
defer frontend.Close()
|
||||||
|
|
||||||
|
getReq, _ := http.NewRequest("GET", frontend.URL, nil)
|
||||||
|
getReq.Header.Add("Connection", "")
|
||||||
|
getReq.Header.Add("Connection", someConnHeader)
|
||||||
|
getReq.Header.Set(someConnHeader, "should be deleted")
|
||||||
|
res, err := frontend.Client().Do(getReq)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Get: %v", err)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
bodyBytes, err := io.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("reading body: %v", err)
|
||||||
|
}
|
||||||
|
if got, want := string(bodyBytes), backendResponse; got != want {
|
||||||
|
t.Errorf("got body %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if c := res.Header.Get("Connection"); c != "" {
|
||||||
|
t.Errorf("handler got header %q = %q; want empty", "Connection", c)
|
||||||
|
}
|
||||||
|
if c := res.Header.Get(someConnHeader); c != "" {
|
||||||
|
t.Errorf("handler got header %q = %q; want empty", someConnHeader, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestXForwardedFor(t *testing.T) {
|
func TestXForwardedFor(t *testing.T) {
|
||||||
const prevForwardedFor = "client ip"
|
const prevForwardedFor = "client ip"
|
||||||
const backendResponse = "I am the backend"
|
const backendResponse = "I am the backend"
|
||||||
|
@ -402,7 +402,7 @@ func (r *Resolver) LookupPort(ctx context.Context, network, service string) (por
|
|||||||
// LookupCNAME uses context.Background internally; to specify the context, use
|
// LookupCNAME uses context.Background internally; to specify the context, use
|
||||||
// Resolver.LookupCNAME.
|
// Resolver.LookupCNAME.
|
||||||
func LookupCNAME(host string) (cname string, err error) {
|
func LookupCNAME(host string) (cname string, err error) {
|
||||||
return DefaultResolver.lookupCNAME(context.Background(), host)
|
return DefaultResolver.LookupCNAME(context.Background(), host)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LookupCNAME returns the canonical name for the given host.
|
// LookupCNAME returns the canonical name for the given host.
|
||||||
@ -442,7 +442,7 @@ func (r *Resolver) LookupCNAME(ctx context.Context, host string) (string, error)
|
|||||||
// The returned service names are validated to be properly
|
// The returned service names are validated to be properly
|
||||||
// formatted presentation-format domain names.
|
// formatted presentation-format domain names.
|
||||||
func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err error) {
|
func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err error) {
|
||||||
return DefaultResolver.lookupSRV(context.Background(), service, proto, name)
|
return DefaultResolver.LookupSRV(context.Background(), service, proto, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LookupSRV tries to resolve an SRV query of the given service,
|
// LookupSRV tries to resolve an SRV query of the given service,
|
||||||
@ -484,7 +484,7 @@ func (r *Resolver) LookupSRV(ctx context.Context, service, proto, name string) (
|
|||||||
// LookupMX uses context.Background internally; to specify the context, use
|
// LookupMX uses context.Background internally; to specify the context, use
|
||||||
// Resolver.LookupMX.
|
// Resolver.LookupMX.
|
||||||
func LookupMX(name string) ([]*MX, error) {
|
func LookupMX(name string) ([]*MX, error) {
|
||||||
return DefaultResolver.lookupMX(context.Background(), name)
|
return DefaultResolver.LookupMX(context.Background(), name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LookupMX returns the DNS MX records for the given domain name sorted by preference.
|
// LookupMX returns the DNS MX records for the given domain name sorted by preference.
|
||||||
@ -515,7 +515,7 @@ func (r *Resolver) LookupMX(ctx context.Context, name string) ([]*MX, error) {
|
|||||||
// LookupNS uses context.Background internally; to specify the context, use
|
// LookupNS uses context.Background internally; to specify the context, use
|
||||||
// Resolver.LookupNS.
|
// Resolver.LookupNS.
|
||||||
func LookupNS(name string) ([]*NS, error) {
|
func LookupNS(name string) ([]*NS, error) {
|
||||||
return DefaultResolver.lookupNS(context.Background(), name)
|
return DefaultResolver.LookupNS(context.Background(), name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LookupNS returns the DNS NS records for the given domain name.
|
// LookupNS returns the DNS NS records for the given domain name.
|
||||||
@ -554,20 +554,23 @@ func (r *Resolver) LookupTXT(ctx context.Context, name string) ([]string, error)
|
|||||||
// LookupAddr performs a reverse lookup for the given address, returning a list
|
// LookupAddr performs a reverse lookup for the given address, returning a list
|
||||||
// of names mapping to that address.
|
// of names mapping to that address.
|
||||||
//
|
//
|
||||||
|
// The returned names are validated to be properly formatted presentation-format
|
||||||
|
// domain names.
|
||||||
|
//
|
||||||
// When using the host C library resolver, at most one result will be
|
// When using the host C library resolver, at most one result will be
|
||||||
// returned. To bypass the host resolver, use a custom Resolver.
|
// returned. To bypass the host resolver, use a custom Resolver.
|
||||||
//
|
//
|
||||||
// LookupAddr uses context.Background internally; to specify the context, use
|
// LookupAddr uses context.Background internally; to specify the context, use
|
||||||
// Resolver.LookupAddr.
|
// Resolver.LookupAddr.
|
||||||
func LookupAddr(addr string) (names []string, err error) {
|
func LookupAddr(addr string) (names []string, err error) {
|
||||||
return DefaultResolver.lookupAddr(context.Background(), addr)
|
return DefaultResolver.LookupAddr(context.Background(), addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LookupAddr performs a reverse lookup for the given address, returning a list
|
// LookupAddr performs a reverse lookup for the given address, returning a list
|
||||||
// of names mapping to that address.
|
// of names mapping to that address.
|
||||||
//
|
//
|
||||||
// The returned names are validated to be properly
|
// The returned names are validated to be properly formatted presentation-format
|
||||||
// formatted presentation-format domain names.
|
// domain names.
|
||||||
func (r *Resolver) LookupAddr(ctx context.Context, addr string) ([]string, error) {
|
func (r *Resolver) LookupAddr(ctx context.Context, addr string) ([]string, error) {
|
||||||
names, err := r.lookupAddr(ctx, addr)
|
names, err := r.lookupAddr(ctx, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -442,12 +442,14 @@ func TestFdReadRace(t *testing.T) {
|
|||||||
defer r.Close()
|
defer r.Close()
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
|
|
||||||
c := make(chan bool)
|
const count = 10
|
||||||
|
|
||||||
|
c := make(chan bool, 1)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
var buf [10]byte
|
var buf [count]byte
|
||||||
r.SetReadDeadline(time.Now().Add(time.Minute))
|
r.SetReadDeadline(time.Now().Add(time.Minute))
|
||||||
c <- true
|
c <- true
|
||||||
if _, err := r.Read(buf[:]); os.IsTimeout(err) {
|
if _, err := r.Read(buf[:]); os.IsTimeout(err) {
|
||||||
@ -466,8 +468,9 @@ func TestFdReadRace(t *testing.T) {
|
|||||||
r.Fd()
|
r.Fd()
|
||||||
|
|
||||||
// The bug was that Fd would hang until Read timed out.
|
// The bug was that Fd would hang until Read timed out.
|
||||||
// If the bug is fixed, then closing r here will cause
|
// If the bug is fixed, then writing to w and closing r here
|
||||||
// the Read to exit before the timeout expires.
|
// will cause the Read to exit before the timeout expires.
|
||||||
|
w.Write(make([]byte, count))
|
||||||
r.Close()
|
r.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -296,7 +296,7 @@ const (
|
|||||||
// high addresses if viewed as unsigned).
|
// high addresses if viewed as unsigned).
|
||||||
//
|
//
|
||||||
// On aix/ppc64, this offset allows to keep the heapAddrBits to
|
// On aix/ppc64, this offset allows to keep the heapAddrBits to
|
||||||
// 48. Otherwize, it would be 60 in order to handle mmap addresses
|
// 48. Otherwise, it would be 60 in order to handle mmap addresses
|
||||||
// (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
|
// (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
|
||||||
// case, the memory reserved in (s *pageAlloc).init for chunks
|
// case, the memory reserved in (s *pageAlloc).init for chunks
|
||||||
// is causing important slowdowns.
|
// is causing important slowdowns.
|
||||||
|
Loading…
Reference in New Issue
Block a user