mirror of
https://github.com/golang/go
synced 2024-11-23 23:10:09 -07:00
return after block
This commit is contained in:
parent
9d33956503
commit
69ff43bcd0
@ -271,82 +271,3 @@ func TestTruncatedStreams(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that flate.Reader.Read returns (n, io.EOF) instead
|
||||
// of (n, nil) + (0, io.EOF) when possible.
|
||||
//
|
||||
// This helps net/http.Transport reuse HTTP/1 connections more
|
||||
// aggressively.
|
||||
//
|
||||
// See https://github.com/google/go-github/pull/317 for background.
|
||||
func TestReaderEarlyEOF(t *testing.T) {
|
||||
t.Parallel()
|
||||
testSizes := []int{
|
||||
1, 2, 3, 4, 5, 6, 7, 8,
|
||||
100, 1000, 10000, 100000,
|
||||
128, 1024, 16384, 131072,
|
||||
|
||||
// Testing multiples of windowSize triggers the case
|
||||
// where Read will fail to return an early io.EOF.
|
||||
windowSize * 1, windowSize * 2, windowSize * 3,
|
||||
}
|
||||
|
||||
var maxSize int
|
||||
for _, n := range testSizes {
|
||||
if maxSize < n {
|
||||
maxSize = n
|
||||
}
|
||||
}
|
||||
|
||||
readBuf := make([]byte, 40)
|
||||
data := make([]byte, maxSize)
|
||||
for i := range data {
|
||||
data[i] = byte(i)
|
||||
}
|
||||
|
||||
for _, sz := range testSizes {
|
||||
if testing.Short() && sz > windowSize {
|
||||
continue
|
||||
}
|
||||
for _, flush := range []bool{true, false} {
|
||||
earlyEOF := true // Do we expect early io.EOF?
|
||||
|
||||
var buf bytes.Buffer
|
||||
w, _ := NewWriter(&buf, 5)
|
||||
w.Write(data[:sz])
|
||||
if flush {
|
||||
// If a Flush occurs after all the actual data, the flushing
|
||||
// semantics dictate that we will observe a (0, io.EOF) since
|
||||
// Read must return data before it knows that the stream ended.
|
||||
w.Flush()
|
||||
earlyEOF = false
|
||||
}
|
||||
w.Close()
|
||||
|
||||
r := NewReader(&buf)
|
||||
for {
|
||||
n, err := r.Read(readBuf)
|
||||
if err == io.EOF {
|
||||
// If the availWrite == windowSize, then that means that the
|
||||
// previous Read returned because the write buffer was full
|
||||
// and it just so happened that the stream had no more data.
|
||||
// This situation is rare, but unavoidable.
|
||||
if r.(*decompressor).dict.availWrite() == windowSize {
|
||||
earlyEOF = false
|
||||
}
|
||||
|
||||
if n == 0 && earlyEOF {
|
||||
t.Errorf("On size:%d flush:%v, Read() = (0, io.EOF), want (n, io.EOF)", sz, flush)
|
||||
}
|
||||
if n != 0 && !earlyEOF {
|
||||
t.Errorf("On size:%d flush:%v, Read() = (%d, io.EOF), want (0, io.EOF)", sz, flush, n)
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -676,12 +676,14 @@ func (f *decompressor) copyData() {
|
||||
}
|
||||
|
||||
func (f *decompressor) finishBlock() {
|
||||
if f.dict.availRead() > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
}
|
||||
|
||||
if f.final {
|
||||
if f.dict.availRead() > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
}
|
||||
f.err = io.EOF
|
||||
}
|
||||
|
||||
f.step = (*decompressor).nextBlock
|
||||
}
|
||||
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
@ -135,3 +136,23 @@ func TestReaderReusesReaderBuffer(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReaderPartialBlock(t *testing.T) {
|
||||
data, err := os.ReadFile("testdata/partial-block")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
r := NewReader(bytes.NewReader(data))
|
||||
rb := make([]byte, 32)
|
||||
n, err := r.Read(rb)
|
||||
if err != nil {
|
||||
t.Fatalf("Read: %v", err)
|
||||
}
|
||||
|
||||
expected := "hello, world"
|
||||
actual := string(rb[:n])
|
||||
if expected != actual {
|
||||
t.Fatalf("expected: %v, got: %v", expected, actual)
|
||||
}
|
||||
}
|
||||
|
1
src/compress/flate/testdata/partial-block
vendored
Normal file
1
src/compress/flate/testdata/partial-block
vendored
Normal file
@ -0,0 +1 @@
|
||||
ΚHΝΙΙΧQ(Ο/ΚI
|
Loading…
Reference in New Issue
Block a user