mirror of
https://github.com/golang/go
synced 2024-11-22 08:54:39 -07:00
[dev.regabi] all: merge master (1d78139
) into dev.regabi
Merge List: + 2020-12-261d78139128
runtime/cgo: fix Android build with NDK 22 + 2020-12-252018b68a65
net/mail: don't use MDT in test + 2020-12-23b116404444
runtime: shift timeHistogram buckets and allow negative durations + 2020-12-238db7e2fecd
runtime: fix allocs-by-size and frees-by-size buckets + 2020-12-23fb96f07e1a
runtime: fix nStackRoots comment about stack roots + 2020-12-23d1502b3c72
lib/time, time/tzdata: update tzdata to 2020e + 2020-12-2330c99cbb7a
cmd/go: add the Retract field to 'go help mod edit' definition of the GoMod struct + 2020-12-2349d0b239cb
doc: fix a typo in contribute.html + 2020-12-2398a73030b0
cmd/go: in 'go get', promote named implicit dependencies to explicit + 2020-12-23fd6ba1c8a2
os/signal: fix a deadlock with syscall.AllThreadsSyscall() use + 2020-12-23b0b0d98283
runtime: linux iscgo support for not blocking nptl signals + 2020-12-22223331fc0c
cmd/go/internal/modload: add hint for missing implicit dependency Change-Id: I76d79f17c546cab03fab1facc36cc3f834d9d126
This commit is contained in:
commit
07569dac4e
@ -1129,7 +1129,7 @@ sometimes required because the standard library code you're modifying
|
||||
might require a newer version than the stable one you have installed).
|
||||
|
||||
<pre>
|
||||
$ cd $GODIR/src/hash/sha1
|
||||
$ cd $GODIR/src/crypto/sha1
|
||||
$ [make changes...]
|
||||
$ $GODIR/bin/go test .
|
||||
</pre>
|
||||
|
@ -8,8 +8,8 @@
|
||||
# Consult https://www.iana.org/time-zones for the latest versions.
|
||||
|
||||
# Versions to use.
|
||||
CODE=2020d
|
||||
DATA=2020d
|
||||
CODE=2020e
|
||||
DATA=2020e
|
||||
|
||||
set -e
|
||||
rm -rf work
|
||||
|
Binary file not shown.
@ -1192,6 +1192,7 @@
|
||||
// Require []Require
|
||||
// Exclude []Module
|
||||
// Replace []Replace
|
||||
// Retract []Retract
|
||||
// }
|
||||
//
|
||||
// type Require struct {
|
||||
|
@ -95,6 +95,7 @@ writing it back to go.mod. The JSON output corresponds to these Go types:
|
||||
Require []Require
|
||||
Exclude []Module
|
||||
Replace []Replace
|
||||
Retract []Retract
|
||||
}
|
||||
|
||||
type Require struct {
|
||||
|
@ -28,6 +28,11 @@ import (
|
||||
//
|
||||
var buildList []module.Version
|
||||
|
||||
// additionalExplicitRequirements is a list of modules paths for which
|
||||
// WriteGoMod should record explicit requirements, even if they would be
|
||||
// selected without those requirements. Each path must also appear in buildList.
|
||||
var additionalExplicitRequirements []string
|
||||
|
||||
// capVersionSlice returns s with its cap reduced to its length.
|
||||
func capVersionSlice(s []module.Version) []module.Version {
|
||||
return s[:len(s):len(s)]
|
||||
@ -121,6 +126,12 @@ func EditBuildList(ctx context.Context, add, mustSelect []module.Version) error
|
||||
|
||||
if !inconsistent {
|
||||
buildList = final
|
||||
additionalExplicitRequirements = make([]string, 0, len(mustSelect))
|
||||
for _, m := range mustSelect {
|
||||
if m.Version != "none" {
|
||||
additionalExplicitRequirements = append(additionalExplicitRequirements, m.Path)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -27,6 +28,7 @@ import (
|
||||
"cmd/go/internal/modfetch"
|
||||
"cmd/go/internal/mvs"
|
||||
"cmd/go/internal/search"
|
||||
"cmd/go/internal/str"
|
||||
|
||||
"golang.org/x/mod/modfile"
|
||||
"golang.org/x/mod/module"
|
||||
@ -845,13 +847,15 @@ func AllowWriteGoMod() {
|
||||
// MinReqs returns a Reqs with minimal additional dependencies of Target,
|
||||
// as will be written to go.mod.
|
||||
func MinReqs() mvs.Reqs {
|
||||
var retain []string
|
||||
retain := append([]string{}, additionalExplicitRequirements...)
|
||||
for _, m := range buildList[1:] {
|
||||
_, explicit := index.require[m]
|
||||
if explicit || loaded.direct[m.Path] {
|
||||
retain = append(retain, m.Path)
|
||||
}
|
||||
}
|
||||
sort.Strings(retain)
|
||||
str.Uniq(&retain)
|
||||
min, err := mvs.Req(Target, retain, &mvsReqs{buildList: buildList})
|
||||
if err != nil {
|
||||
base.Fatalf("go: %v", err)
|
||||
|
@ -863,12 +863,21 @@ func loadFromRoots(params loaderParams) *loader {
|
||||
for _, pkg := range ld.pkgs {
|
||||
if pkg.mod == Target {
|
||||
for _, dep := range pkg.imports {
|
||||
if dep.mod.Path != "" {
|
||||
if dep.mod.Path != "" && dep.mod.Path != Target.Path && index != nil {
|
||||
_, explicit := index.require[dep.mod]
|
||||
if allowWriteGoMod && cfg.BuildMod == "readonly" && !explicit {
|
||||
// TODO(#40775): attach error to package instead of using
|
||||
// base.Errorf. Ideally, 'go list' should not fail because of this,
|
||||
// but today, LoadPackages calls WriteGoMod unconditionally, which
|
||||
// would fail with a less clear message.
|
||||
base.Errorf("go: %[1]s: package %[2]s imported from implicitly required module; try 'go get -d %[1]s' to add missing requirements", pkg.path, dep.path)
|
||||
}
|
||||
ld.direct[dep.mod.Path] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
base.ExitIfErrors()
|
||||
|
||||
// If we didn't scan all of the imports from the main module, or didn't use
|
||||
// imports.AnyTags, then we didn't necessarily load every package that
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"cmd/go/internal/imports"
|
||||
"cmd/go/internal/modfetch"
|
||||
"cmd/go/internal/search"
|
||||
"cmd/go/internal/str"
|
||||
"cmd/go/internal/trace"
|
||||
|
||||
"golang.org/x/mod/module"
|
||||
@ -1005,13 +1006,8 @@ func (rr *replacementRepo) Versions(prefix string) ([]string, error) {
|
||||
sort.Slice(versions, func(i, j int) bool {
|
||||
return semver.Compare(versions[i], versions[j]) < 0
|
||||
})
|
||||
uniq := versions[:1]
|
||||
for _, v := range versions {
|
||||
if v != uniq[len(uniq)-1] {
|
||||
uniq = append(uniq, v)
|
||||
}
|
||||
}
|
||||
return uniq, nil
|
||||
str.Uniq(&versions)
|
||||
return versions, nil
|
||||
}
|
||||
|
||||
func (rr *replacementRepo) Stat(rev string) (*modfetch.RevInfo, error) {
|
||||
|
@ -96,6 +96,20 @@ func Contains(x []string, s string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Uniq removes consecutive duplicate strings from ss.
|
||||
func Uniq(ss *[]string) {
|
||||
if len(*ss) <= 1 {
|
||||
return
|
||||
}
|
||||
uniq := (*ss)[:1]
|
||||
for _, s := range *ss {
|
||||
if s != uniq[len(uniq)-1] {
|
||||
uniq = append(uniq, s)
|
||||
}
|
||||
}
|
||||
*ss = uniq
|
||||
}
|
||||
|
||||
func isSpaceByte(c byte) bool {
|
||||
return c == ' ' || c == '\t' || c == '\n' || c == '\r'
|
||||
}
|
||||
|
88
src/cmd/go/testdata/script/mod_get_promote_implicit.txt
vendored
Normal file
88
src/cmd/go/testdata/script/mod_get_promote_implicit.txt
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
cp go.mod.orig go.mod
|
||||
|
||||
# If we list a package in an implicit dependency imported from the main module,
|
||||
# we should get an error because the dependency should have an explicit
|
||||
# requirement.
|
||||
go list -m indirect-with-pkg
|
||||
stdout '^indirect-with-pkg v1.0.0 => ./indirect-with-pkg$'
|
||||
! go list ./use-indirect
|
||||
stderr '^go: m/use-indirect: package indirect-with-pkg imported from implicitly required module; try ''go get -d m/use-indirect'' to add missing requirements$'
|
||||
|
||||
# We can promote the implicit requirement by getting the importing package,
|
||||
# as hinted.
|
||||
go get -d m/use-indirect
|
||||
cmp go.mod go.mod.use
|
||||
cp go.mod.orig go.mod
|
||||
|
||||
# We can also promote implicit requirements using 'go get' on them, or their
|
||||
# packages. This gives us "// indirect" requirements, since 'go get' doesn't
|
||||
# know they're needed by the main module. See #43131 for the rationale.
|
||||
go get -d indirect-with-pkg indirect-without-pkg
|
||||
cmp go.mod go.mod.indirect
|
||||
|
||||
-- go.mod.orig --
|
||||
module m
|
||||
|
||||
go 1.16
|
||||
|
||||
require direct v1.0.0
|
||||
|
||||
replace (
|
||||
direct v1.0.0 => ./direct
|
||||
indirect-with-pkg v1.0.0 => ./indirect-with-pkg
|
||||
indirect-without-pkg v1.0.0 => ./indirect-without-pkg
|
||||
)
|
||||
-- go.mod.use --
|
||||
module m
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
direct v1.0.0
|
||||
indirect-with-pkg v1.0.0
|
||||
)
|
||||
|
||||
replace (
|
||||
direct v1.0.0 => ./direct
|
||||
indirect-with-pkg v1.0.0 => ./indirect-with-pkg
|
||||
indirect-without-pkg v1.0.0 => ./indirect-without-pkg
|
||||
)
|
||||
-- go.mod.indirect --
|
||||
module m
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
direct v1.0.0
|
||||
indirect-with-pkg v1.0.0 // indirect
|
||||
indirect-without-pkg v1.0.0 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
direct v1.0.0 => ./direct
|
||||
indirect-with-pkg v1.0.0 => ./indirect-with-pkg
|
||||
indirect-without-pkg v1.0.0 => ./indirect-without-pkg
|
||||
)
|
||||
-- use-indirect/use-indirect.go --
|
||||
package use
|
||||
|
||||
import _ "indirect-with-pkg"
|
||||
-- direct/go.mod --
|
||||
module direct
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
indirect-with-pkg v1.0.0
|
||||
indirect-without-pkg v1.0.0
|
||||
)
|
||||
-- indirect-with-pkg/go.mod --
|
||||
module indirect-with-pkg
|
||||
|
||||
go 1.16
|
||||
-- indirect-with-pkg/p.go --
|
||||
package p
|
||||
-- indirect-without-pkg/go.mod --
|
||||
module indirect-without-pkg
|
||||
|
||||
go 1.16
|
@ -107,8 +107,8 @@ func TestDateParsing(t *testing.T) {
|
||||
time.Date(1997, 11, 20, 9, 55, 6, 0, time.FixedZone("", -6*60*60)),
|
||||
},
|
||||
{
|
||||
"Thu, 20 Nov 1997 09:55:06 MDT (MDT)",
|
||||
time.Date(1997, 11, 20, 9, 55, 6, 0, time.FixedZone("MDT", 0)),
|
||||
"Thu, 20 Nov 1997 09:55:06 GMT (GMT)",
|
||||
time.Date(1997, 11, 20, 9, 55, 6, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
"Fri, 21 Nov 1997 09:55:06 +1300 (TOT)",
|
||||
@ -278,8 +278,8 @@ func TestDateParsingCFWS(t *testing.T) {
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Fri, 21 Nov 1997 09:55:06 MDT (MDT)",
|
||||
time.Date(1997, 11, 21, 9, 55, 6, 0, time.FixedZone("MDT", 0)),
|
||||
"Fri, 21 Nov 1997 09:55:06 GMT (GMT)",
|
||||
time.Date(1997, 11, 21, 9, 55, 6, 0, time.UTC),
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
42
src/os/signal/signal_linux_test.go
Normal file
42
src/os/signal/signal_linux_test.go
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package signal
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const prSetKeepCaps = 8
|
||||
|
||||
// This test validates that syscall.AllThreadsSyscall() can reliably
|
||||
// reach all 'm' (threads) of the nocgo runtime even when one thread
|
||||
// is blocked waiting to receive signals from the kernel. This monitors
|
||||
// for a regression vs. the fix for #43149.
|
||||
func TestAllThreadsSyscallSignals(t *testing.T) {
|
||||
if _, _, err := syscall.AllThreadsSyscall(syscall.SYS_PRCTL, prSetKeepCaps, 0, 0); err == syscall.ENOTSUP {
|
||||
t.Skip("AllThreadsSyscall disabled with cgo")
|
||||
}
|
||||
|
||||
sig := make(chan os.Signal, 1)
|
||||
Notify(sig, os.Interrupt)
|
||||
|
||||
for i := 0; i <= 100; i++ {
|
||||
if _, _, errno := syscall.AllThreadsSyscall(syscall.SYS_PRCTL, prSetKeepCaps, uintptr(i&1), 0); errno != 0 {
|
||||
t.Fatalf("[%d] failed to set KEEP_CAPS=%d: %v", i, i&1, errno)
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
case <-sig:
|
||||
t.Fatal("unexpected signal")
|
||||
}
|
||||
Stop(sig)
|
||||
}
|
@ -12,7 +12,7 @@ static void *threadentry(void*);
|
||||
static void (*setg_gcc)(void*);
|
||||
|
||||
// This will be set in gcc_android.c for android-specific customization.
|
||||
void (*x_cgo_inittls)(void **tlsg, void **tlsbase);
|
||||
void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
|
||||
|
||||
void
|
||||
x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
|
||||
|
@ -14,7 +14,7 @@ static void* threadentry(void*);
|
||||
static void (*setg_gcc)(void*);
|
||||
|
||||
// This will be set in gcc_android.c for android-specific customization.
|
||||
void (*x_cgo_inittls)(void **tlsg, void **tlsbase);
|
||||
void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
|
||||
|
||||
void
|
||||
x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
|
||||
|
@ -10,7 +10,7 @@
|
||||
|
||||
static void *threadentry(void*);
|
||||
|
||||
void (*x_cgo_inittls)(void **tlsg, void **tlsbase);
|
||||
void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
|
||||
static void (*setg_gcc)(void*);
|
||||
|
||||
void
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
static void *threadentry(void*);
|
||||
|
||||
void (*x_cgo_inittls)(void **tlsg, void **tlsbase);
|
||||
void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
|
||||
static void (*setg_gcc)(void*);
|
||||
|
||||
void
|
||||
|
@ -1201,12 +1201,12 @@ type TimeHistogram timeHistogram
|
||||
|
||||
// Counts returns the counts for the given bucket, subBucket indices.
|
||||
// Returns true if the bucket was valid, otherwise returns the counts
|
||||
// for the overflow bucket and false.
|
||||
// for the underflow bucket and false.
|
||||
func (th *TimeHistogram) Count(bucket, subBucket uint) (uint64, bool) {
|
||||
t := (*timeHistogram)(th)
|
||||
i := bucket*TimeHistNumSubBuckets + subBucket
|
||||
if i >= uint(len(t.counts)) {
|
||||
return t.overflow, false
|
||||
return t.underflow, false
|
||||
}
|
||||
return t.counts[i], true
|
||||
}
|
||||
|
@ -69,17 +69,15 @@ const (
|
||||
// for concurrent use. It is also safe to read all the values
|
||||
// atomically.
|
||||
type timeHistogram struct {
|
||||
counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]uint64
|
||||
overflow uint64
|
||||
counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]uint64
|
||||
underflow uint64
|
||||
}
|
||||
|
||||
// record adds the given duration to the distribution.
|
||||
//
|
||||
// Although the duration is an int64 to facilitate ease-of-use
|
||||
// with e.g. nanotime, the duration must be non-negative.
|
||||
func (h *timeHistogram) record(duration int64) {
|
||||
if duration < 0 {
|
||||
throw("timeHistogram encountered negative duration")
|
||||
atomic.Xadd64(&h.underflow, 1)
|
||||
return
|
||||
}
|
||||
// The index of the exponential bucket is just the index
|
||||
// of the highest set bit adjusted for how many bits we
|
||||
@ -92,15 +90,17 @@ func (h *timeHistogram) record(duration int64) {
|
||||
superBucket = uint(sys.Len64(uint64(duration))) - timeHistSubBucketBits
|
||||
if superBucket*timeHistNumSubBuckets >= uint(len(h.counts)) {
|
||||
// The bucket index we got is larger than what we support, so
|
||||
// add into the special overflow bucket.
|
||||
atomic.Xadd64(&h.overflow, 1)
|
||||
return
|
||||
// include this count in the highest bucket, which extends to
|
||||
// infinity.
|
||||
superBucket = timeHistNumSuperBuckets - 1
|
||||
subBucket = timeHistNumSubBuckets - 1
|
||||
} else {
|
||||
// The linear subbucket index is just the timeHistSubBucketsBits
|
||||
// bits after the top bit. To extract that value, shift down
|
||||
// the duration such that we leave the top bit and the next bits
|
||||
// intact, then extract the index.
|
||||
subBucket = uint((duration >> (superBucket - 1)) % timeHistNumSubBuckets)
|
||||
}
|
||||
// The linear subbucket index is just the timeHistSubBucketsBits
|
||||
// bits after the top bit. To extract that value, shift down
|
||||
// the duration such that we leave the top bit and the next bits
|
||||
// intact, then extract the index.
|
||||
subBucket = uint((duration >> (superBucket - 1)) % timeHistNumSubBuckets)
|
||||
} else {
|
||||
subBucket = uint(duration)
|
||||
}
|
||||
@ -128,7 +128,7 @@ func timeHistogramMetricsBuckets() []float64 {
|
||||
// index to combine it with the bucketMin.
|
||||
subBucketShift := uint(0)
|
||||
if i > 1 {
|
||||
// The first two buckets are exact with respect to integers,
|
||||
// The first two super buckets are exact with respect to integers,
|
||||
// so we'll never have to shift the sub-bucket index. Thereafter,
|
||||
// we shift up by 1 with each subsequent bucket.
|
||||
subBucketShift = uint(i - 2)
|
||||
|
@ -5,6 +5,7 @@
|
||||
package runtime_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
. "runtime"
|
||||
"testing"
|
||||
)
|
||||
@ -32,8 +33,8 @@ func TestTimeHistogram(t *testing.T) {
|
||||
h.Record(base + v)
|
||||
}
|
||||
}
|
||||
// Hit the overflow bucket.
|
||||
h.Record(int64(^uint64(0) >> 1))
|
||||
// Hit the underflow bucket.
|
||||
h.Record(int64(-1))
|
||||
|
||||
// Check to make sure there's exactly one count in each
|
||||
// bucket.
|
||||
@ -41,7 +42,7 @@ func TestTimeHistogram(t *testing.T) {
|
||||
for j := uint(0); j < TimeHistNumSubBuckets; j++ {
|
||||
c, ok := h.Count(i, j)
|
||||
if !ok {
|
||||
t.Errorf("hit overflow bucket unexpectedly: (%d, %d)", i, j)
|
||||
t.Errorf("hit underflow bucket unexpectedly: (%d, %d)", i, j)
|
||||
} else if c != 1 {
|
||||
t.Errorf("bucket (%d, %d) has count that is not 1: %d", i, j, c)
|
||||
}
|
||||
@ -49,10 +50,21 @@ func TestTimeHistogram(t *testing.T) {
|
||||
}
|
||||
c, ok := h.Count(TimeHistNumSuperBuckets, 0)
|
||||
if ok {
|
||||
t.Errorf("expected to hit overflow bucket: (%d, %d)", TimeHistNumSuperBuckets, 0)
|
||||
t.Errorf("expected to hit underflow bucket: (%d, %d)", TimeHistNumSuperBuckets, 0)
|
||||
}
|
||||
if c != 1 {
|
||||
t.Errorf("overflow bucket has count that is not 1: %d", c)
|
||||
t.Errorf("underflow bucket has count that is not 1: %d", c)
|
||||
}
|
||||
|
||||
// Check overflow behavior.
|
||||
// By hitting a high value, we should just be adding into the highest bucket.
|
||||
h.Record(math.MaxInt64)
|
||||
c, ok = h.Count(TimeHistNumSuperBuckets-1, TimeHistNumSubBuckets-1)
|
||||
if !ok {
|
||||
t.Error("hit underflow bucket in highest bucket unexpectedly")
|
||||
} else if c != 2 {
|
||||
t.Errorf("highest has count that is not 2: %d", c)
|
||||
}
|
||||
|
||||
dummyTimeHistogram = TimeHistogram{}
|
||||
}
|
||||
|
@ -43,7 +43,18 @@ func initMetrics() {
|
||||
}
|
||||
sizeClassBuckets = make([]float64, _NumSizeClasses)
|
||||
for i := range sizeClassBuckets {
|
||||
sizeClassBuckets[i] = float64(class_to_size[i])
|
||||
// Size classes have an inclusive upper-bound
|
||||
// and exclusive lower bound (e.g. 48-byte size class is
|
||||
// (32, 48]) whereas we want and inclusive lower-bound
|
||||
// and exclusive upper-bound (e.g. 48-byte size class is
|
||||
// [33, 49). We can achieve this by shifting all bucket
|
||||
// boundaries up by 1.
|
||||
//
|
||||
// Also, a float64 can precisely represent integers with
|
||||
// value up to 2^53 and size classes are relatively small
|
||||
// (nowhere near 2^48 even) so this will give us exact
|
||||
// boundaries.
|
||||
sizeClassBuckets[i] = float64(class_to_size[i] + 1)
|
||||
}
|
||||
timeHistBuckets = timeHistogramMetricsBuckets()
|
||||
metrics = map[string]metricData{
|
||||
@ -105,9 +116,9 @@ func initMetrics() {
|
||||
"/gc/pauses:seconds": {
|
||||
compute: func(_ *statAggregate, out *metricValue) {
|
||||
hist := out.float64HistOrInit(timeHistBuckets)
|
||||
hist.counts[len(hist.counts)-1] = atomic.Load64(&memstats.gcPauseDist.overflow)
|
||||
hist.counts[0] = atomic.Load64(&memstats.gcPauseDist.underflow)
|
||||
for i := range hist.buckets {
|
||||
hist.counts[i] = atomic.Load64(&memstats.gcPauseDist.counts[i])
|
||||
hist.counts[i+1] = atomic.Load64(&memstats.gcPauseDist.counts[i])
|
||||
}
|
||||
},
|
||||
},
|
||||
|
@ -154,6 +154,12 @@ func TestReadMetricsConsistency(t *testing.T) {
|
||||
if totalVirtual.got != totalVirtual.want {
|
||||
t.Errorf(`"/memory/classes/total:bytes" does not match sum of /memory/classes/**: got %d, want %d`, totalVirtual.got, totalVirtual.want)
|
||||
}
|
||||
if objects.alloc.Counts[0] > 0 {
|
||||
t.Error("found counts for objects of non-positive size in allocs-by-size")
|
||||
}
|
||||
if objects.free.Counts[0] > 0 {
|
||||
t.Error("found counts for objects of non-positive size in frees-by-size")
|
||||
}
|
||||
if len(objects.alloc.Buckets) != len(objects.free.Buckets) {
|
||||
t.Error("allocs-by-size and frees-by-size buckets don't match in length")
|
||||
} else if len(objects.alloc.Counts) != len(objects.free.Counts) {
|
||||
|
@ -101,8 +101,7 @@ func gcMarkRootPrepare() {
|
||||
// Gs may be created after this point, but it's okay that we
|
||||
// ignore them because they begin life without any roots, so
|
||||
// there's nothing to scan, and any roots they create during
|
||||
// the concurrent phase will be scanned during mark
|
||||
// termination.
|
||||
// the concurrent phase will be caught by the write barrier.
|
||||
work.nStackRoots = int(atomic.Loaduintptr(&allglen))
|
||||
|
||||
work.markrootNext = 0
|
||||
|
@ -72,7 +72,7 @@ func clearSignalHandlers() {
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func sigblock() {
|
||||
func sigblock(exiting bool) {
|
||||
}
|
||||
|
||||
// Called to initialize a new m (including the bootstrap m).
|
||||
|
@ -301,6 +301,24 @@ func getHugePageSize() uintptr {
|
||||
func osinit() {
|
||||
ncpu = getproccount()
|
||||
physHugePageSize = getHugePageSize()
|
||||
if iscgo {
|
||||
// #42494 glibc and musl reserve some signals for
|
||||
// internal use and require they not be blocked by
|
||||
// the rest of a normal C runtime. When the go runtime
|
||||
// blocks...unblocks signals, temporarily, the blocked
|
||||
// interval of time is generally very short. As such,
|
||||
// these expectations of *libc code are mostly met by
|
||||
// the combined go+cgo system of threads. However,
|
||||
// when go causes a thread to exit, via a return from
|
||||
// mstart(), the combined runtime can deadlock if
|
||||
// these signals are blocked. Thus, don't block these
|
||||
// signals when exiting threads.
|
||||
// - glibc: SIGCANCEL (32), SIGSETXID (33)
|
||||
// - musl: SIGTIMER (32), SIGCANCEL (33), SIGSYNCCALL (34)
|
||||
sigdelset(&sigsetAllExiting, 32)
|
||||
sigdelset(&sigsetAllExiting, 33)
|
||||
sigdelset(&sigsetAllExiting, 34)
|
||||
}
|
||||
osArchInit()
|
||||
}
|
||||
|
||||
|
@ -195,7 +195,7 @@ func msigrestore(sigmask sigset) {
|
||||
func clearSignalHandlers() {
|
||||
}
|
||||
|
||||
func sigblock() {
|
||||
func sigblock(exiting bool) {
|
||||
}
|
||||
|
||||
// Called to initialize a new m (including the bootstrap m).
|
||||
|
@ -886,7 +886,7 @@ func clearSignalHandlers() {
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func sigblock() {
|
||||
func sigblock(exiting bool) {
|
||||
}
|
||||
|
||||
// Called to initialize a new m (including the bootstrap m).
|
||||
|
@ -1313,7 +1313,7 @@ func mexit(osStack bool) {
|
||||
throw("locked m0 woke up")
|
||||
}
|
||||
|
||||
sigblock()
|
||||
sigblock(true)
|
||||
unminit()
|
||||
|
||||
// Free the gsignal stack.
|
||||
@ -1515,6 +1515,7 @@ func syscall_runtime_doAllThreadsSyscall(fn func(bool) bool) {
|
||||
if netpollinited() {
|
||||
netpollBreak()
|
||||
}
|
||||
sigRecvPrepareForFixup()
|
||||
_g_ := getg()
|
||||
if raceenabled {
|
||||
// For m's running without racectx, we loan out the
|
||||
@ -1754,7 +1755,7 @@ func needm() {
|
||||
// starting a new m to run Go code via newosproc.
|
||||
var sigmask sigset
|
||||
sigsave(&sigmask)
|
||||
sigblock()
|
||||
sigblock(false)
|
||||
|
||||
// Lock extra list, take head, unlock popped list.
|
||||
// nilokay=false is safe here because of the invariant above,
|
||||
@ -1903,7 +1904,7 @@ func dropm() {
|
||||
// Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
|
||||
// It's important not to try to handle a signal between those two steps.
|
||||
sigmask := mp.sigmask
|
||||
sigblock()
|
||||
sigblock(false)
|
||||
unminit()
|
||||
|
||||
mnext := lockextra(true)
|
||||
@ -3776,7 +3777,7 @@ func beforefork() {
|
||||
// group. See issue #18600.
|
||||
gp.m.locks++
|
||||
sigsave(&gp.m.sigmask)
|
||||
sigblock()
|
||||
sigblock(false)
|
||||
|
||||
// This function is called before fork in syscall package.
|
||||
// Code between fork and exec must not allocate memory nor even try to grow stack.
|
||||
|
@ -1042,15 +1042,26 @@ func msigrestore(sigmask sigset) {
|
||||
sigprocmask(_SIG_SETMASK, &sigmask, nil)
|
||||
}
|
||||
|
||||
// sigblock blocks all signals in the current thread's signal mask.
|
||||
// sigsetAllExiting is used by sigblock(true) when a thread is
|
||||
// exiting. sigset_all is defined in OS specific code, and per GOOS
|
||||
// behavior may override this default for sigsetAllExiting: see
|
||||
// osinit().
|
||||
var sigsetAllExiting = sigset_all
|
||||
|
||||
// sigblock blocks signals in the current thread's signal mask.
|
||||
// This is used to block signals while setting up and tearing down g
|
||||
// when a non-Go thread calls a Go function.
|
||||
// The OS-specific code is expected to define sigset_all.
|
||||
// when a non-Go thread calls a Go function. When a thread is exiting
|
||||
// we use the sigsetAllExiting value, otherwise the OS specific
|
||||
// definition of sigset_all is used.
|
||||
// This is nosplit and nowritebarrierrec because it is called by needm
|
||||
// which may be called on a non-Go thread with no g available.
|
||||
//go:nosplit
|
||||
//go:nowritebarrierrec
|
||||
func sigblock() {
|
||||
func sigblock(exiting bool) {
|
||||
if exiting {
|
||||
sigprocmask(_SIG_SETMASK, &sigsetAllExiting, nil)
|
||||
return
|
||||
}
|
||||
sigprocmask(_SIG_SETMASK, &sigset_all, nil)
|
||||
}
|
||||
|
||||
|
@ -12,12 +12,16 @@
|
||||
// sigsend is called by the signal handler to queue a new signal.
|
||||
// signal_recv is called by the Go program to receive a newly queued signal.
|
||||
// Synchronization between sigsend and signal_recv is based on the sig.state
|
||||
// variable. It can be in 3 states: sigIdle, sigReceiving and sigSending.
|
||||
// variable. It can be in 4 states: sigIdle, sigReceiving, sigSending and sigFixup.
|
||||
// sigReceiving means that signal_recv is blocked on sig.Note and there are no
|
||||
// new pending signals.
|
||||
// sigSending means that sig.mask *may* contain new pending signals,
|
||||
// signal_recv can't be blocked in this state.
|
||||
// sigIdle means that there are no new pending signals and signal_recv is not blocked.
|
||||
// sigFixup is a transient state that can only exist as a short
|
||||
// transition from sigReceiving and then on to sigIdle: it is
|
||||
// used to ensure the AllThreadsSyscall()'s mDoFixup() operation
|
||||
// occurs on the sleeping m, waiting to receive a signal.
|
||||
// Transitions between states are done atomically with CAS.
|
||||
// When signal_recv is unblocked, it resets sig.Note and rechecks sig.mask.
|
||||
// If several sigsends and signal_recv execute concurrently, it can lead to
|
||||
@ -59,6 +63,7 @@ const (
|
||||
sigIdle = iota
|
||||
sigReceiving
|
||||
sigSending
|
||||
sigFixup
|
||||
)
|
||||
|
||||
// sigsend delivers a signal from sighandler to the internal signal delivery queue.
|
||||
@ -112,6 +117,9 @@ Send:
|
||||
notewakeup(&sig.note)
|
||||
break Send
|
||||
}
|
||||
case sigFixup:
|
||||
// nothing to do - we need to wait for sigIdle.
|
||||
osyield()
|
||||
}
|
||||
}
|
||||
|
||||
@ -119,6 +127,19 @@ Send:
|
||||
return true
|
||||
}
|
||||
|
||||
// sigRecvPrepareForFixup is used to temporarily wake up the
|
||||
// signal_recv() running thread while it is blocked waiting for the
|
||||
// arrival of a signal. If it causes the thread to wake up, the
|
||||
// sig.state travels through this sequence: sigReceiving -> sigFixup
|
||||
// -> sigIdle -> sigReceiving and resumes. (This is only called while
|
||||
// GC is disabled.)
|
||||
//go:nosplit
|
||||
func sigRecvPrepareForFixup() {
|
||||
if atomic.Cas(&sig.state, sigReceiving, sigFixup) {
|
||||
notewakeup(&sig.note)
|
||||
}
|
||||
}
|
||||
|
||||
// Called to receive the next queued signal.
|
||||
// Must only be called from a single goroutine at a time.
|
||||
//go:linkname signal_recv os/signal.signal_recv
|
||||
@ -146,7 +167,16 @@ func signal_recv() uint32 {
|
||||
}
|
||||
notetsleepg(&sig.note, -1)
|
||||
noteclear(&sig.note)
|
||||
break Receive
|
||||
if !atomic.Cas(&sig.state, sigFixup, sigIdle) {
|
||||
break Receive
|
||||
}
|
||||
// Getting here, the code will
|
||||
// loop around again to sleep
|
||||
// in state sigReceiving. This
|
||||
// path is taken when
|
||||
// sigRecvPrepareForFixup()
|
||||
// has been called by another
|
||||
// thread.
|
||||
}
|
||||
case sigSending:
|
||||
if atomic.Cas(&sig.state, sigSending, sigIdle) {
|
||||
|
@ -92,6 +92,13 @@ func sendNote(s *byte) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// sigRecvPrepareForFixup is a no-op on plan9. (This would only be
|
||||
// called while GC is disabled.)
|
||||
//
|
||||
//go:nosplit
|
||||
func sigRecvPrepareForFixup() {
|
||||
}
|
||||
|
||||
// Called to receive the next queued signal.
|
||||
// Must only be called from a single goroutine at a time.
|
||||
//go:linkname signal_recv os/signal.signal_recv
|
||||
|
@ -597,6 +597,14 @@ func compareStatus(filter, expect string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// killAThread locks the goroutine to an OS thread and exits; this
|
||||
// causes an OS thread to terminate.
|
||||
func killAThread(c <-chan struct{}) {
|
||||
runtime.LockOSThread()
|
||||
<-c
|
||||
return
|
||||
}
|
||||
|
||||
// TestSetuidEtc performs tests on all of the wrapped system calls
|
||||
// that mirror to the 9 glibc syscalls with POSIX semantics. The test
|
||||
// here is considered authoritative and should compile and run
|
||||
@ -647,6 +655,11 @@ func TestSetuidEtc(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, v := range vs {
|
||||
// Generate some thread churn as we execute the tests.
|
||||
c := make(chan struct{})
|
||||
go killAThread(c)
|
||||
close(c)
|
||||
|
||||
if err := v.fn(); err != nil {
|
||||
t.Errorf("[%d] %q failed: %v", i, v.call, err)
|
||||
continue
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user