mirror of
https://github.com/golang/go
synced 2024-11-07 12:36:27 -07:00
net/http: remove more arbitrary timeouts from server tests
This change eliminates the easy, arbitrary timouts that should never happen. It leaves in place a couple of more complicated ones that will probably need retry loops for robustness. For #49336. For #36179. Change-Id: I657ef223a66461413a915da5ce9150f49acec04a Reviewed-on: https://go-review.googlesource.com/c/go/+/476035 Run-TryBot: Bryan Mills <bcmills@google.com> Auto-Submit: Bryan Mills <bcmills@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Damien Neil <dneil@google.com>
This commit is contained in:
parent
82c713feb0
commit
e8543a6fa6
@ -35,7 +35,6 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"regexp"
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/debug"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -827,15 +826,7 @@ func testWriteDeadlineExtendedOnNewRequest(t *testing.T, mode testMode) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fail test if no response after 1 second
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
req = req.WithContext(ctx)
|
|
||||||
|
|
||||||
r, err := c.Do(req)
|
r, err := c.Do(req)
|
||||||
if ctx.Err() == context.DeadlineExceeded {
|
|
||||||
t.Fatalf("http2 Get #%d response timed out", i)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("http2 Get #%d: %v", i, err)
|
t.Fatalf("http2 Get #%d: %v", i, err)
|
||||||
}
|
}
|
||||||
@ -988,25 +979,19 @@ func testOnlyWriteTimeout(t *testing.T, mode testMode) {
|
|||||||
|
|
||||||
c := ts.Client()
|
c := ts.Client()
|
||||||
|
|
||||||
errc := make(chan error, 1)
|
err := func() error {
|
||||||
go func() {
|
|
||||||
res, err := c.Get(ts.URL)
|
res, err := c.Get(ts.URL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errc <- err
|
return err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
_, err = io.Copy(io.Discard, res.Body)
|
_, err = io.Copy(io.Discard, res.Body)
|
||||||
res.Body.Close()
|
res.Body.Close()
|
||||||
errc <- err
|
return err
|
||||||
}()
|
}()
|
||||||
select {
|
|
||||||
case err := <-errc:
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected an error from Get request")
|
t.Errorf("expected an error copying body from Get request")
|
||||||
}
|
|
||||||
case <-time.After(10 * time.Second):
|
|
||||||
t.Fatal("timeout waiting for Get error")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := <-afterTimeoutErrc; err == nil {
|
if err := <-afterTimeoutErrc; err == nil {
|
||||||
t.Error("expected write error after timeout")
|
t.Error("expected write error after timeout")
|
||||||
}
|
}
|
||||||
@ -1133,21 +1118,10 @@ func testTCPConnectionCloses(t *testing.T, req string, h Handler) {
|
|||||||
t.Fatal("ReadResponse error:", err)
|
t.Fatal("ReadResponse error:", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
didReadAll := make(chan bool, 1)
|
|
||||||
go func() {
|
|
||||||
select {
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Error("body not closed after 5s")
|
|
||||||
return
|
|
||||||
case <-didReadAll:
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
_, err = io.ReadAll(r)
|
_, err = io.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("read error:", err)
|
t.Fatal("read error:", err)
|
||||||
}
|
}
|
||||||
didReadAll <- true
|
|
||||||
|
|
||||||
if !res.Close {
|
if !res.Close {
|
||||||
t.Errorf("Response.Close = false; want true")
|
t.Errorf("Response.Close = false; want true")
|
||||||
@ -1323,7 +1297,6 @@ func testServerAllowsBlockingRemoteAddr(t *testing.T, mode testMode) {
|
|||||||
}).ts
|
}).ts
|
||||||
|
|
||||||
c := ts.Client()
|
c := ts.Client()
|
||||||
c.Timeout = time.Second
|
|
||||||
// Force separate connection for each:
|
// Force separate connection for each:
|
||||||
c.Transport.(*Transport).DisableKeepAlives = true
|
c.Transport.(*Transport).DisableKeepAlives = true
|
||||||
|
|
||||||
@ -1526,8 +1499,6 @@ func TestServeTLS(t *testing.T) {
|
|||||||
case err := <-errc:
|
case err := <-errc:
|
||||||
t.Fatalf("ServeTLS: %v", err)
|
t.Fatalf("ServeTLS: %v", err)
|
||||||
case <-serving:
|
case <-serving:
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Fatal("timeout")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{
|
c, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{
|
||||||
@ -2820,18 +2791,7 @@ func testHandlerPanic(t *testing.T, withHijack bool, mode testMode, wrapper func
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var delay time.Duration
|
<-done
|
||||||
if deadline, ok := t.Deadline(); ok {
|
|
||||||
delay = time.Until(deadline)
|
|
||||||
} else {
|
|
||||||
delay = 5 * time.Second
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
return
|
|
||||||
case <-time.After(delay):
|
|
||||||
t.Fatal("expected server handler to log an error")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type terrorWriter struct{ t *testing.T }
|
type terrorWriter struct{ t *testing.T }
|
||||||
@ -2871,11 +2831,7 @@ func testServerWriteHijackZeroBytes(t *testing.T, mode testMode) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
res.Body.Close()
|
res.Body.Close()
|
||||||
select {
|
<-done
|
||||||
case <-done:
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Fatal("timeout")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServerNoDate(t *testing.T) {
|
func TestServerNoDate(t *testing.T) {
|
||||||
@ -3238,8 +3194,6 @@ For:
|
|||||||
diec <- true
|
diec <- true
|
||||||
case <-sawClose:
|
case <-sawClose:
|
||||||
break For
|
break For
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Fatal("timeout")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ts.Close()
|
ts.Close()
|
||||||
@ -3295,9 +3249,6 @@ func testCloseNotifierPipelined(t *testing.T, mode testMode) {
|
|||||||
if closes > 1 {
|
if closes > 1 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
ts.CloseClientConnections()
|
|
||||||
t.Fatal("timeout")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3408,12 +3359,8 @@ func testHijackBeforeRequestBodyRead(t *testing.T, mode testMode) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
bodyOkay <- true
|
bodyOkay <- true
|
||||||
select {
|
<-gone
|
||||||
case <-gone:
|
|
||||||
gotCloseNotify <- true
|
gotCloseNotify <- true
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
gotCloseNotify <- false
|
|
||||||
}
|
|
||||||
})).ts
|
})).ts
|
||||||
|
|
||||||
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
|
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
|
||||||
@ -3429,9 +3376,7 @@ func testHijackBeforeRequestBodyRead(t *testing.T, mode testMode) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
conn.Close()
|
conn.Close()
|
||||||
if !<-gotCloseNotify {
|
<-gotCloseNotify
|
||||||
t.Error("timeout waiting for CloseNotify")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOptions(t *testing.T) { run(t, testOptions, []testMode{http1Mode}) }
|
func TestOptions(t *testing.T) { run(t, testOptions, []testMode{http1Mode}) }
|
||||||
@ -3507,14 +3452,9 @@ func testOptionsHandler(t *testing.T, mode testMode) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
if got := <-rc; got.Method != "OPTIONS" || got.RequestURI != "*" {
|
||||||
case got := <-rc:
|
|
||||||
if got.Method != "OPTIONS" || got.RequestURI != "*" {
|
|
||||||
t.Errorf("Expected OPTIONS * request, got %v", got)
|
t.Errorf("Expected OPTIONS * request, got %v", got)
|
||||||
}
|
}
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Error("timeout")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests regarding the ordering of Write, WriteHeader, Header, and
|
// Tests regarding the ordering of Write, WriteHeader, Header, and
|
||||||
@ -3985,8 +3925,6 @@ func testTransportAndServerSharedBodyRace(t *testing.T, mode testMode) {
|
|||||||
}
|
}
|
||||||
<-unblockBackend
|
<-unblockBackend
|
||||||
}))
|
}))
|
||||||
var quitTimer *time.Timer
|
|
||||||
defer func() { quitTimer.Stop() }()
|
|
||||||
defer backend.close()
|
defer backend.close()
|
||||||
|
|
||||||
backendRespc := make(chan *Response, 1)
|
backendRespc := make(chan *Response, 1)
|
||||||
@ -4019,20 +3957,6 @@ func testTransportAndServerSharedBodyRace(t *testing.T, mode testMode) {
|
|||||||
rw.Write([]byte("OK"))
|
rw.Write([]byte("OK"))
|
||||||
}))
|
}))
|
||||||
defer proxy.close()
|
defer proxy.close()
|
||||||
defer func() {
|
|
||||||
// Before we shut down our two httptest.Servers, start a timer.
|
|
||||||
// We choose 7 seconds because httptest.Server starts logging
|
|
||||||
// warnings to stderr at 5 seconds. If we don't disarm this bomb
|
|
||||||
// in 7 seconds (after the two httptest.Server.Close calls above),
|
|
||||||
// then we explode with stacks.
|
|
||||||
quitTimer = time.AfterFunc(7*time.Second, func() {
|
|
||||||
debug.SetTraceback("ALL")
|
|
||||||
stacks := make([]byte, 1<<20)
|
|
||||||
stacks = stacks[:runtime.Stack(stacks, true)]
|
|
||||||
fmt.Fprintf(os.Stderr, "%s", stacks)
|
|
||||||
log.Fatalf("Timeout.")
|
|
||||||
})
|
|
||||||
}()
|
|
||||||
|
|
||||||
defer close(unblockBackend)
|
defer close(unblockBackend)
|
||||||
req, _ := NewRequest("POST", proxy.ts.URL, io.LimitReader(neverEnding('a'), bodySize))
|
req, _ := NewRequest("POST", proxy.ts.URL, io.LimitReader(neverEnding('a'), bodySize))
|
||||||
@ -4098,8 +4022,6 @@ func testRequestBodyCloseDoesntBlock(t *testing.T, mode testMode) {
|
|||||||
}
|
}
|
||||||
case err := <-errCh:
|
case err := <-errCh:
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Error("timeout")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4176,22 +4098,7 @@ func testServerConnState(t *testing.T, mode testMode) {
|
|||||||
|
|
||||||
doRequests()
|
doRequests()
|
||||||
|
|
||||||
stateDelay := 5 * time.Second
|
<-complete
|
||||||
if deadline, ok := t.Deadline(); ok {
|
|
||||||
// Allow an arbitrarily long delay.
|
|
||||||
// This test was observed to be flaky on the darwin-arm64-corellium builder,
|
|
||||||
// so we're increasing the deadline to see if it starts passing.
|
|
||||||
// See https://golang.org/issue/37322.
|
|
||||||
const arbitraryCleanupMargin = 1 * time.Second
|
|
||||||
stateDelay = time.Until(deadline) - arbitraryCleanupMargin
|
|
||||||
}
|
|
||||||
timer := time.NewTimer(stateDelay)
|
|
||||||
select {
|
|
||||||
case <-timer.C:
|
|
||||||
t.Errorf("Timed out after %v waiting for connection to change state.", stateDelay)
|
|
||||||
case <-complete:
|
|
||||||
timer.Stop()
|
|
||||||
}
|
|
||||||
sl := <-activeLog
|
sl := <-activeLog
|
||||||
if !reflect.DeepEqual(sl.got, sl.want) {
|
if !reflect.DeepEqual(sl.got, sl.want) {
|
||||||
t.Errorf("Request(s) produced unexpected state sequence.\nGot: %v\nWant: %v", sl.got, sl.want)
|
t.Errorf("Request(s) produced unexpected state sequence.\nGot: %v\nWant: %v", sl.got, sl.want)
|
||||||
@ -4480,8 +4387,6 @@ func testServerKeepAliveAfterWriteError(t *testing.T, mode testMode) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
timeout := time.NewTimer(numReq * 2 * time.Second) // 4x overkill
|
|
||||||
defer timeout.Stop()
|
|
||||||
addrSeen := map[string]bool{}
|
addrSeen := map[string]bool{}
|
||||||
numOkay := 0
|
numOkay := 0
|
||||||
for {
|
for {
|
||||||
@ -4501,8 +4406,6 @@ func testServerKeepAliveAfterWriteError(t *testing.T, mode testMode) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
numOkay++
|
numOkay++
|
||||||
}
|
}
|
||||||
case <-timeout.C:
|
|
||||||
t.Fatal("timeout waiting for requests to complete")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -4936,16 +4839,12 @@ func testServerContext_LocalAddrContextKey(t *testing.T, mode testMode) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
host := cst.ts.Listener.Addr().String()
|
host := cst.ts.Listener.Addr().String()
|
||||||
select {
|
got := <-ch
|
||||||
case got := <-ch:
|
|
||||||
if addr, ok := got.(net.Addr); !ok {
|
if addr, ok := got.(net.Addr); !ok {
|
||||||
t.Errorf("local addr value = %T; want net.Addr", got)
|
t.Errorf("local addr value = %T; want net.Addr", got)
|
||||||
} else if fmt.Sprint(addr) != host {
|
} else if fmt.Sprint(addr) != host {
|
||||||
t.Errorf("local addr = %v; want %v", addr, host)
|
t.Errorf("local addr = %v; want %v", addr, host)
|
||||||
}
|
}
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Error("timed out")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://golang.org/issue/15960
|
// https://golang.org/issue/15960
|
||||||
@ -5151,8 +5050,9 @@ func BenchmarkClient(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Start server process.
|
// Start server process.
|
||||||
cmd := exec.Command(os.Args[0], "-test.run=XXXX", "-test.bench=BenchmarkClient$")
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
cmd.Env = append(os.Environ(), "TEST_BENCH_SERVER=yes")
|
cmd := testenv.CommandContext(b, ctx, os.Args[0], "-test.run=XXXX", "-test.bench=BenchmarkClient$")
|
||||||
|
cmd.Env = append(cmd.Environ(), "TEST_BENCH_SERVER=yes")
|
||||||
cmd.Stderr = os.Stderr
|
cmd.Stderr = os.Stderr
|
||||||
stdout, err := cmd.StdoutPipe()
|
stdout, err := cmd.StdoutPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -5161,35 +5061,28 @@ func BenchmarkClient(b *testing.B) {
|
|||||||
if err := cmd.Start(); err != nil {
|
if err := cmd.Start(); err != nil {
|
||||||
b.Fatalf("subprocess failed to start: %v", err)
|
b.Fatalf("subprocess failed to start: %v", err)
|
||||||
}
|
}
|
||||||
defer cmd.Process.Kill()
|
|
||||||
|
done := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
done <- cmd.Wait()
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
defer func() {
|
||||||
|
cancel()
|
||||||
|
<-done
|
||||||
|
}()
|
||||||
|
|
||||||
// Wait for the server in the child process to respond and tell us
|
// Wait for the server in the child process to respond and tell us
|
||||||
// its listening address, once it's started listening:
|
// its listening address, once it's started listening:
|
||||||
timer := time.AfterFunc(10*time.Second, func() {
|
|
||||||
cmd.Process.Kill()
|
|
||||||
})
|
|
||||||
defer timer.Stop()
|
|
||||||
bs := bufio.NewScanner(stdout)
|
bs := bufio.NewScanner(stdout)
|
||||||
if !bs.Scan() {
|
if !bs.Scan() {
|
||||||
b.Fatalf("failed to read listening URL from child: %v", bs.Err())
|
b.Fatalf("failed to read listening URL from child: %v", bs.Err())
|
||||||
}
|
}
|
||||||
url := "http://" + strings.TrimSpace(bs.Text()) + "/"
|
url := "http://" + strings.TrimSpace(bs.Text()) + "/"
|
||||||
timer.Stop()
|
|
||||||
if _, err := getNoBody(url); err != nil {
|
if _, err := getNoBody(url); err != nil {
|
||||||
b.Fatalf("initial probe of child process failed: %v", err)
|
b.Fatalf("initial probe of child process failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
done := make(chan error)
|
|
||||||
stop := make(chan struct{})
|
|
||||||
defer close(stop)
|
|
||||||
go func() {
|
|
||||||
select {
|
|
||||||
case <-stop:
|
|
||||||
return
|
|
||||||
case done <- cmd.Wait():
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Do b.N requests to the server.
|
// Do b.N requests to the server.
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
@ -5210,14 +5103,9 @@ func BenchmarkClient(b *testing.B) {
|
|||||||
|
|
||||||
// Instruct server process to stop.
|
// Instruct server process to stop.
|
||||||
getNoBody(url + "?stop=yes")
|
getNoBody(url + "?stop=yes")
|
||||||
select {
|
if err := <-done; err != nil {
|
||||||
case err := <-done:
|
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("subprocess failed: %v", err)
|
b.Fatalf("subprocess failed: %v", err)
|
||||||
}
|
}
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
b.Fatalf("subprocess did not stop")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkServerFakeConnNoKeepAlive(b *testing.B) {
|
func BenchmarkServerFakeConnNoKeepAlive(b *testing.B) {
|
||||||
@ -5426,8 +5314,6 @@ func benchmarkCloseNotifier(b *testing.B, mode testMode) {
|
|||||||
<-rw.(CloseNotifier).CloseNotify()
|
<-rw.(CloseNotifier).CloseNotify()
|
||||||
sawClose <- true
|
sawClose <- true
|
||||||
})).ts
|
})).ts
|
||||||
tot := time.NewTimer(5 * time.Second)
|
|
||||||
defer tot.Stop()
|
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
|
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
|
||||||
@ -5439,12 +5325,7 @@ func benchmarkCloseNotifier(b *testing.B, mode testMode) {
|
|||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
conn.Close()
|
conn.Close()
|
||||||
tot.Reset(5 * time.Second)
|
<-sawClose
|
||||||
select {
|
|
||||||
case <-sawClose:
|
|
||||||
case <-tot.C:
|
|
||||||
b.Fatal("timeout")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
}
|
}
|
||||||
@ -5603,11 +5484,7 @@ func testServerShutdown(t *testing.T, mode testMode) {
|
|||||||
if err := <-shutdownRes; err != nil {
|
if err := <-shutdownRes; err != nil {
|
||||||
t.Fatalf("Shutdown: %v", err)
|
t.Fatalf("Shutdown: %v", err)
|
||||||
}
|
}
|
||||||
select {
|
<-gotOnShutdown // Will hang if RegisterOnShutdown is broken.
|
||||||
case <-gotOnShutdown:
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Errorf("onShutdown callback not called, RegisterOnShutdown broken?")
|
|
||||||
}
|
|
||||||
|
|
||||||
if states := <-statesRes; states[StateActive] != 1 {
|
if states := <-statesRes; states[StateActive] != 1 {
|
||||||
t.Errorf("connection in wrong state, %v", states)
|
t.Errorf("connection in wrong state, %v", states)
|
||||||
@ -5678,14 +5555,9 @@ func testServerShutdownStateNew(t *testing.T, mode testMode) {
|
|||||||
|
|
||||||
// Wait for c.Read to unblock; should be already done at this point,
|
// Wait for c.Read to unblock; should be already done at this point,
|
||||||
// or within a few milliseconds.
|
// or within a few milliseconds.
|
||||||
select {
|
if err := <-readRes; err == nil {
|
||||||
case err := <-readRes:
|
|
||||||
if err == nil {
|
|
||||||
t.Error("expected error from Read")
|
t.Error("expected error from Read")
|
||||||
}
|
}
|
||||||
case <-time.After(2 * time.Second):
|
|
||||||
t.Errorf("timeout waiting for Read to unblock")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Issue 17878: tests that we can call Close twice.
|
// Issue 17878: tests that we can call Close twice.
|
||||||
@ -5954,11 +5826,7 @@ func testServerHijackGetsBackgroundByte(t *testing.T, mode testMode) {
|
|||||||
if err := cn.(*net.TCPConn).CloseWrite(); err != nil {
|
if err := cn.(*net.TCPConn).CloseWrite(); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
select {
|
<-done
|
||||||
case <-done:
|
|
||||||
case <-time.After(2 * time.Second):
|
|
||||||
t.Error("timeout")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Like TestServerHijackGetsBackgroundByte above but sending a
|
// Like TestServerHijackGetsBackgroundByte above but sending a
|
||||||
|
Loading…
Reference in New Issue
Block a user