1
0
mirror of https://github.com/golang/go synced 2024-11-26 22:31:23 -07:00

sync: remove TestWaitGroupMisuse2 and TestWaitGroupMisuse3

These tests are inherently nondeterministic: They exercise a racy
code path for up to one million iterations, and require that an
error occur at least once.

TestWaitGroupMisuse2 in particular is an ongoing source of trybot
flakiness.

Fixes #38163.

Change-Id: Ibbbda2c998c915333487ad262d3df6829de01c2b
Reviewed-on: https://go-review.googlesource.com/c/go/+/340249
Trust: Damien Neil <dneil@google.com>
Trust: Dmitri Shuralyov <dmitshur@golang.org>
Run-TryBot: Damien Neil <dneil@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
This commit is contained in:
Damien Neil 2021-08-05 11:32:43 -07:00
parent fbdf83072c
commit 011fd00245

View File

@ -5,8 +5,6 @@
package sync_test
import (
"internal/race"
"runtime"
. "sync"
"sync/atomic"
"testing"
@ -48,12 +46,6 @@ func TestWaitGroup(t *testing.T) {
}
}
func knownRacy(t *testing.T) {
if race.Enabled {
t.Skip("skipping known-racy test under the race detector")
}
}
func TestWaitGroupMisuse(t *testing.T) {
defer func() {
err := recover()
@ -68,124 +60,6 @@ func TestWaitGroupMisuse(t *testing.T) {
t.Fatal("Should panic")
}
// pollUntilEqual blocks until v, loaded atomically, is
// equal to the target.
func pollUntilEqual(v *uint32, target uint32) {
for {
for i := 0; i < 1e3; i++ {
if atomic.LoadUint32(v) == target {
return
}
}
// yield to avoid deadlock with the garbage collector
// see issue #20072
runtime.Gosched()
}
}
func TestWaitGroupMisuse2(t *testing.T) {
knownRacy(t)
if runtime.NumCPU() <= 4 {
t.Skip("NumCPU<=4, skipping: this test requires parallelism")
}
defer func() {
err := recover()
if err != "sync: negative WaitGroup counter" &&
err != "sync: WaitGroup misuse: Add called concurrently with Wait" &&
err != "sync: WaitGroup is reused before previous Wait has returned" {
t.Fatalf("Unexpected panic: %#v", err)
}
}()
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
done := make(chan interface{}, 2)
// The detection is opportunistic, so we want it to panic
// at least in one run out of a million.
for i := 0; i < 1e6; i++ {
var wg WaitGroup
var here uint32
wg.Add(1)
go func() {
defer func() {
done <- recover()
}()
atomic.AddUint32(&here, 1)
pollUntilEqual(&here, 3)
wg.Wait()
}()
go func() {
defer func() {
done <- recover()
}()
atomic.AddUint32(&here, 1)
pollUntilEqual(&here, 3)
wg.Add(1) // This is the bad guy.
wg.Done()
}()
atomic.AddUint32(&here, 1)
pollUntilEqual(&here, 3)
wg.Done()
for j := 0; j < 2; j++ {
if err := <-done; err != nil {
panic(err)
}
}
}
t.Fatal("Should panic")
}
func TestWaitGroupMisuse3(t *testing.T) {
knownRacy(t)
if runtime.NumCPU() <= 1 {
t.Skip("NumCPU==1, skipping: this test requires parallelism")
}
defer func() {
err := recover()
if err != "sync: negative WaitGroup counter" &&
err != "sync: WaitGroup misuse: Add called concurrently with Wait" &&
err != "sync: WaitGroup is reused before previous Wait has returned" {
t.Fatalf("Unexpected panic: %#v", err)
}
}()
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
done := make(chan interface{}, 3)
// The detection is opportunistically, so we want it to panic
// at least in one run out of a million.
for i := 0; i < 1e6; i++ {
var wg WaitGroup
wg.Add(1)
go func() {
defer func() {
done <- recover()
}()
wg.Done()
}()
go func() {
defer func() {
done <- recover()
}()
wg.Wait()
// Start reusing the wg before waiting for the Wait below to return.
wg.Add(1)
go func() {
wg.Done()
}()
wg.Wait()
}()
go func() {
defer func() {
done <- recover()
}()
wg.Wait()
}()
for j := 0; j < 3; j++ {
if err := <-done; err != nil {
panic(err)
}
}
}
t.Fatal("Should panic")
}
func TestWaitGroupRace(t *testing.T) {
// Run this test for about 1ms.
for i := 0; i < 1000; i++ {