1
0
mirror of https://github.com/golang/go synced 2024-11-06 23:26:12 -07:00
go/src/bytes/buffer.go

434 lines
14 KiB
Go
Raw Normal View History

// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bytes
// Simple byte buffer for marshaling data.
import (
"errors"
"io"
"unicode/utf8"
)
// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
// The zero value for Buffer is an empty buffer ready to use.
type Buffer struct {
bytes: make bytes.Buffer cache-friendly During benchmark of an internal tool we found out that (*Buffer).Reset() was surprisingly showing up in CPU profiles. This CL contains two related changes aimed at speeding up Reset(): 1. Create a fast path for Truncate(0) by moving the logic to Reset() (this makes Reset() a simple leaf func that gets inlined since it gets compiled to 3 MOVx instructions). Accordingly change calls in the rest of the Buffer methods to call Reset() instead of Truncate(0). 2. Reorder the fields in the Buffer struct so that frequently accessed fields are packed together (buf, off, lastRead). This also make them likely to be in the same cacheline. Ideally it would be advisable to have Buffer{} cacheline-aligned, but I couldn't find a way to do this without changing the size of the bootstrap array (but this will cause some regressions, because it will make duffcopy show up in CPU profiles where it wasn't showing up before). go1 benchmarks are not really affected, but some other benchmarks that exercise Buffer more show improvements: name old time/op new time/op delta BinaryTree17-4 2.46s ± 9% 2.43s ± 3% ~ (p=0.982 n=14+14) Fannkuch11-4 2.98s ± 1% 2.90s ± 1% -2.58% (p=0.000 n=15+14) FmtFprintfEmpty-4 45.2ns ± 1% 45.2ns ± 1% ~ (p=0.494 n=14+15) FmtFprintfString-4 76.8ns ± 1% 83.1ns ± 2% +8.23% (p=0.000 n=10+15) FmtFprintfInt-4 78.0ns ± 2% 74.6ns ± 1% -4.46% (p=0.000 n=15+15) FmtFprintfIntInt-4 113ns ± 1% 109ns ± 2% -2.91% (p=0.000 n=13+15) FmtFprintfPrefixedInt-4 152ns ± 2% 143ns ± 2% -6.04% (p=0.000 n=15+14) FmtFprintfFloat-4 224ns ± 1% 222ns ± 2% -1.08% (p=0.001 n=15+14) FmtManyArgs-4 464ns ± 2% 463ns ± 2% ~ (p=0.303 n=14+15) GobDecode-4 6.25ms ± 2% 6.32ms ± 3% +1.20% (p=0.002 n=14+14) GobEncode-4 5.41ms ± 2% 5.41ms ± 2% ~ (p=0.967 n=15+15) Gzip-4 215ms ± 2% 218ms ± 2% +1.35% (p=0.002 n=15+15) Gunzip-4 34.3ms ± 2% 34.2ms ± 2% ~ (p=0.539 n=15+15) HTTPClientServer-4 76.4µs ± 2% 75.4µs ± 1% -1.31% (p=0.000 n=15+15) JSONEncode-4 14.7ms ± 2% 14.6ms ± 3% ~ (p=0.094 n=14+14) JSONDecode-4 48.0ms ± 1% 48.5ms ± 1% +0.92% (p=0.001 n=14+12) Mandelbrot200-4 4.04ms ± 2% 4.06ms ± 1% ~ (p=0.108 n=15+13) GoParse-4 2.99ms ± 2% 3.00ms ± 1% ~ (p=0.130 n=15+13) RegexpMatchEasy0_32-4 78.3ns ± 1% 79.5ns ± 1% +1.51% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 185ns ± 1% 186ns ± 1% +0.76% (p=0.005 n=15+15) RegexpMatchEasy1_32-4 79.0ns ± 2% 76.7ns ± 1% -2.87% (p=0.000 n=14+15) name old speed new speed delta GobDecode-4 123MB/s ± 2% 121MB/s ± 3% -1.18% (p=0.002 n=14+14) GobEncode-4 142MB/s ± 2% 142MB/s ± 1% ~ (p=0.959 n=15+15) Gzip-4 90.3MB/s ± 2% 89.1MB/s ± 2% -1.34% (p=0.002 n=15+15) Gunzip-4 565MB/s ± 2% 567MB/s ± 2% ~ (p=0.539 n=15+15) JSONEncode-4 132MB/s ± 2% 133MB/s ± 3% ~ (p=0.091 n=14+14) JSONDecode-4 40.4MB/s ± 1% 40.0MB/s ± 1% -0.92% (p=0.001 n=14+12) GoParse-4 19.4MB/s ± 2% 19.3MB/s ± 1% ~ (p=0.121 n=15+13) RegexpMatchEasy0_32-4 409MB/s ± 1% 403MB/s ± 1% -1.47% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 5.53GB/s ± 1% 5.49GB/s ± 1% -0.86% (p=0.002 n=15+15) RegexpMatchEasy1_32-4 405MB/s ± 2% 417MB/s ± 1% +2.94% (p=0.000 n=14+15) name old time/op new time/op delta PoolsSingle1K-4 34.9ns ± 2% 30.4ns ± 4% -12.80% (p=0.000 n=15+15) PoolsSingle64K-4 36.9ns ± 1% 34.4ns ± 4% -6.72% (p=0.000 n=14+15) PoolsRandomSmall-4 34.8ns ± 3% 29.5ns ± 1% -15.19% (p=0.000 n=15+14) PoolsRandomLarge-4 38.6ns ± 1% 34.3ns ± 3% -11.17% (p=0.000 n=14+15) PoolSingle1K-4 26.1ns ± 1% 21.2ns ± 2% -18.59% (p=0.000 n=15+14) PoolSingle64K-4 26.7ns ± 2% 21.5ns ± 2% -19.72% (p=0.000 n=15+15) MakeSingle1K-4 24.2ns ± 2% 24.3ns ± 3% ~ (p=0.132 n=13+15) MakeSingle64K-4 6.76µs ± 1% 6.96µs ± 5% +2.94% (p=0.002 n=13+13) MakeRandomSmall-4 531ns ± 4% 538ns ± 5% ~ (p=0.066 n=14+15) MakeRandomLarge-4 152µs ± 0% 152µs ± 1% -0.31% (p=0.001 n=14+13) Change-Id: I86d7d9d2cac65335baf62214fbb35ba0fd8f9528 Reviewed-on: https://go-review.googlesource.com/37416 Run-TryBot: Ian Lance Taylor <iant@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
2017-02-24 16:48:00 -07:00
buf []byte // contents are the bytes buf[off : len(buf)]
off int // read at &buf[off], write at &buf[len(buf)]
lastRead readOp // last read operation, so that Unread* can work correctly.
// FIXME: lastRead can fit in a single byte
// memory to hold first slice; helps small buffers avoid allocation.
// FIXME: it would be advisable to align Buffer to cachelines to avoid false
// sharing.
bootstrap [64]byte
}
// The readOp constants describe the last action performed on
// the buffer, so that UnreadRune and UnreadByte can check for
// invalid usage. opReadRuneX constants are chosen such that
// converted to int they correspond to the rune size that was read.
type readOp int
const (
opRead readOp = -1 // Any other read operation.
opInvalid = 0 // Non-read operation.
opReadRune1 = 1 // Read rune of size 1.
opReadRune2 = 2 // Read rune of size 2.
opReadRune3 = 3 // Read rune of size 3.
opReadRune4 = 4 // Read rune of size 4.
)
// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer.
var ErrTooLarge = errors.New("bytes.Buffer: too large")
// Bytes returns a slice of length b.Len() holding the unread portion of the buffer.
// The slice is valid for use only until the next buffer modification (that is,
// only until the next call to a method like Read, Write, Reset, or Truncate).
// The slice aliases the buffer content at least until the next buffer modification,
// so immediate changes to the slice will affect the result of future reads.
func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
// String returns the contents of the unread portion of the buffer
// as a string. If the Buffer is a nil pointer, it returns "<nil>".
func (b *Buffer) String() string {
if b == nil {
// Special case, useful in debugging.
return "<nil>"
}
return string(b.buf[b.off:])
}
// Len returns the number of bytes of the unread portion of the buffer;
// b.Len() == len(b.Bytes()).
func (b *Buffer) Len() int { return len(b.buf) - b.off }
// Cap returns the capacity of the buffer's underlying byte slice, that is, the
// total space allocated for the buffer's data.
func (b *Buffer) Cap() int { return cap(b.buf) }
// Truncate discards all but the first n unread bytes from the buffer
// but continues to use the same allocated storage.
// It panics if n is negative or greater than the length of the buffer.
func (b *Buffer) Truncate(n int) {
bytes: make bytes.Buffer cache-friendly During benchmark of an internal tool we found out that (*Buffer).Reset() was surprisingly showing up in CPU profiles. This CL contains two related changes aimed at speeding up Reset(): 1. Create a fast path for Truncate(0) by moving the logic to Reset() (this makes Reset() a simple leaf func that gets inlined since it gets compiled to 3 MOVx instructions). Accordingly change calls in the rest of the Buffer methods to call Reset() instead of Truncate(0). 2. Reorder the fields in the Buffer struct so that frequently accessed fields are packed together (buf, off, lastRead). This also make them likely to be in the same cacheline. Ideally it would be advisable to have Buffer{} cacheline-aligned, but I couldn't find a way to do this without changing the size of the bootstrap array (but this will cause some regressions, because it will make duffcopy show up in CPU profiles where it wasn't showing up before). go1 benchmarks are not really affected, but some other benchmarks that exercise Buffer more show improvements: name old time/op new time/op delta BinaryTree17-4 2.46s ± 9% 2.43s ± 3% ~ (p=0.982 n=14+14) Fannkuch11-4 2.98s ± 1% 2.90s ± 1% -2.58% (p=0.000 n=15+14) FmtFprintfEmpty-4 45.2ns ± 1% 45.2ns ± 1% ~ (p=0.494 n=14+15) FmtFprintfString-4 76.8ns ± 1% 83.1ns ± 2% +8.23% (p=0.000 n=10+15) FmtFprintfInt-4 78.0ns ± 2% 74.6ns ± 1% -4.46% (p=0.000 n=15+15) FmtFprintfIntInt-4 113ns ± 1% 109ns ± 2% -2.91% (p=0.000 n=13+15) FmtFprintfPrefixedInt-4 152ns ± 2% 143ns ± 2% -6.04% (p=0.000 n=15+14) FmtFprintfFloat-4 224ns ± 1% 222ns ± 2% -1.08% (p=0.001 n=15+14) FmtManyArgs-4 464ns ± 2% 463ns ± 2% ~ (p=0.303 n=14+15) GobDecode-4 6.25ms ± 2% 6.32ms ± 3% +1.20% (p=0.002 n=14+14) GobEncode-4 5.41ms ± 2% 5.41ms ± 2% ~ (p=0.967 n=15+15) Gzip-4 215ms ± 2% 218ms ± 2% +1.35% (p=0.002 n=15+15) Gunzip-4 34.3ms ± 2% 34.2ms ± 2% ~ (p=0.539 n=15+15) HTTPClientServer-4 76.4µs ± 2% 75.4µs ± 1% -1.31% (p=0.000 n=15+15) JSONEncode-4 14.7ms ± 2% 14.6ms ± 3% ~ (p=0.094 n=14+14) JSONDecode-4 48.0ms ± 1% 48.5ms ± 1% +0.92% (p=0.001 n=14+12) Mandelbrot200-4 4.04ms ± 2% 4.06ms ± 1% ~ (p=0.108 n=15+13) GoParse-4 2.99ms ± 2% 3.00ms ± 1% ~ (p=0.130 n=15+13) RegexpMatchEasy0_32-4 78.3ns ± 1% 79.5ns ± 1% +1.51% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 185ns ± 1% 186ns ± 1% +0.76% (p=0.005 n=15+15) RegexpMatchEasy1_32-4 79.0ns ± 2% 76.7ns ± 1% -2.87% (p=0.000 n=14+15) name old speed new speed delta GobDecode-4 123MB/s ± 2% 121MB/s ± 3% -1.18% (p=0.002 n=14+14) GobEncode-4 142MB/s ± 2% 142MB/s ± 1% ~ (p=0.959 n=15+15) Gzip-4 90.3MB/s ± 2% 89.1MB/s ± 2% -1.34% (p=0.002 n=15+15) Gunzip-4 565MB/s ± 2% 567MB/s ± 2% ~ (p=0.539 n=15+15) JSONEncode-4 132MB/s ± 2% 133MB/s ± 3% ~ (p=0.091 n=14+14) JSONDecode-4 40.4MB/s ± 1% 40.0MB/s ± 1% -0.92% (p=0.001 n=14+12) GoParse-4 19.4MB/s ± 2% 19.3MB/s ± 1% ~ (p=0.121 n=15+13) RegexpMatchEasy0_32-4 409MB/s ± 1% 403MB/s ± 1% -1.47% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 5.53GB/s ± 1% 5.49GB/s ± 1% -0.86% (p=0.002 n=15+15) RegexpMatchEasy1_32-4 405MB/s ± 2% 417MB/s ± 1% +2.94% (p=0.000 n=14+15) name old time/op new time/op delta PoolsSingle1K-4 34.9ns ± 2% 30.4ns ± 4% -12.80% (p=0.000 n=15+15) PoolsSingle64K-4 36.9ns ± 1% 34.4ns ± 4% -6.72% (p=0.000 n=14+15) PoolsRandomSmall-4 34.8ns ± 3% 29.5ns ± 1% -15.19% (p=0.000 n=15+14) PoolsRandomLarge-4 38.6ns ± 1% 34.3ns ± 3% -11.17% (p=0.000 n=14+15) PoolSingle1K-4 26.1ns ± 1% 21.2ns ± 2% -18.59% (p=0.000 n=15+14) PoolSingle64K-4 26.7ns ± 2% 21.5ns ± 2% -19.72% (p=0.000 n=15+15) MakeSingle1K-4 24.2ns ± 2% 24.3ns ± 3% ~ (p=0.132 n=13+15) MakeSingle64K-4 6.76µs ± 1% 6.96µs ± 5% +2.94% (p=0.002 n=13+13) MakeRandomSmall-4 531ns ± 4% 538ns ± 5% ~ (p=0.066 n=14+15) MakeRandomLarge-4 152µs ± 0% 152µs ± 1% -0.31% (p=0.001 n=14+13) Change-Id: I86d7d9d2cac65335baf62214fbb35ba0fd8f9528 Reviewed-on: https://go-review.googlesource.com/37416 Run-TryBot: Ian Lance Taylor <iant@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
2017-02-24 16:48:00 -07:00
if n == 0 {
b.Reset()
return
}
b.lastRead = opInvalid
bytes: make bytes.Buffer cache-friendly During benchmark of an internal tool we found out that (*Buffer).Reset() was surprisingly showing up in CPU profiles. This CL contains two related changes aimed at speeding up Reset(): 1. Create a fast path for Truncate(0) by moving the logic to Reset() (this makes Reset() a simple leaf func that gets inlined since it gets compiled to 3 MOVx instructions). Accordingly change calls in the rest of the Buffer methods to call Reset() instead of Truncate(0). 2. Reorder the fields in the Buffer struct so that frequently accessed fields are packed together (buf, off, lastRead). This also make them likely to be in the same cacheline. Ideally it would be advisable to have Buffer{} cacheline-aligned, but I couldn't find a way to do this without changing the size of the bootstrap array (but this will cause some regressions, because it will make duffcopy show up in CPU profiles where it wasn't showing up before). go1 benchmarks are not really affected, but some other benchmarks that exercise Buffer more show improvements: name old time/op new time/op delta BinaryTree17-4 2.46s ± 9% 2.43s ± 3% ~ (p=0.982 n=14+14) Fannkuch11-4 2.98s ± 1% 2.90s ± 1% -2.58% (p=0.000 n=15+14) FmtFprintfEmpty-4 45.2ns ± 1% 45.2ns ± 1% ~ (p=0.494 n=14+15) FmtFprintfString-4 76.8ns ± 1% 83.1ns ± 2% +8.23% (p=0.000 n=10+15) FmtFprintfInt-4 78.0ns ± 2% 74.6ns ± 1% -4.46% (p=0.000 n=15+15) FmtFprintfIntInt-4 113ns ± 1% 109ns ± 2% -2.91% (p=0.000 n=13+15) FmtFprintfPrefixedInt-4 152ns ± 2% 143ns ± 2% -6.04% (p=0.000 n=15+14) FmtFprintfFloat-4 224ns ± 1% 222ns ± 2% -1.08% (p=0.001 n=15+14) FmtManyArgs-4 464ns ± 2% 463ns ± 2% ~ (p=0.303 n=14+15) GobDecode-4 6.25ms ± 2% 6.32ms ± 3% +1.20% (p=0.002 n=14+14) GobEncode-4 5.41ms ± 2% 5.41ms ± 2% ~ (p=0.967 n=15+15) Gzip-4 215ms ± 2% 218ms ± 2% +1.35% (p=0.002 n=15+15) Gunzip-4 34.3ms ± 2% 34.2ms ± 2% ~ (p=0.539 n=15+15) HTTPClientServer-4 76.4µs ± 2% 75.4µs ± 1% -1.31% (p=0.000 n=15+15) JSONEncode-4 14.7ms ± 2% 14.6ms ± 3% ~ (p=0.094 n=14+14) JSONDecode-4 48.0ms ± 1% 48.5ms ± 1% +0.92% (p=0.001 n=14+12) Mandelbrot200-4 4.04ms ± 2% 4.06ms ± 1% ~ (p=0.108 n=15+13) GoParse-4 2.99ms ± 2% 3.00ms ± 1% ~ (p=0.130 n=15+13) RegexpMatchEasy0_32-4 78.3ns ± 1% 79.5ns ± 1% +1.51% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 185ns ± 1% 186ns ± 1% +0.76% (p=0.005 n=15+15) RegexpMatchEasy1_32-4 79.0ns ± 2% 76.7ns ± 1% -2.87% (p=0.000 n=14+15) name old speed new speed delta GobDecode-4 123MB/s ± 2% 121MB/s ± 3% -1.18% (p=0.002 n=14+14) GobEncode-4 142MB/s ± 2% 142MB/s ± 1% ~ (p=0.959 n=15+15) Gzip-4 90.3MB/s ± 2% 89.1MB/s ± 2% -1.34% (p=0.002 n=15+15) Gunzip-4 565MB/s ± 2% 567MB/s ± 2% ~ (p=0.539 n=15+15) JSONEncode-4 132MB/s ± 2% 133MB/s ± 3% ~ (p=0.091 n=14+14) JSONDecode-4 40.4MB/s ± 1% 40.0MB/s ± 1% -0.92% (p=0.001 n=14+12) GoParse-4 19.4MB/s ± 2% 19.3MB/s ± 1% ~ (p=0.121 n=15+13) RegexpMatchEasy0_32-4 409MB/s ± 1% 403MB/s ± 1% -1.47% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 5.53GB/s ± 1% 5.49GB/s ± 1% -0.86% (p=0.002 n=15+15) RegexpMatchEasy1_32-4 405MB/s ± 2% 417MB/s ± 1% +2.94% (p=0.000 n=14+15) name old time/op new time/op delta PoolsSingle1K-4 34.9ns ± 2% 30.4ns ± 4% -12.80% (p=0.000 n=15+15) PoolsSingle64K-4 36.9ns ± 1% 34.4ns ± 4% -6.72% (p=0.000 n=14+15) PoolsRandomSmall-4 34.8ns ± 3% 29.5ns ± 1% -15.19% (p=0.000 n=15+14) PoolsRandomLarge-4 38.6ns ± 1% 34.3ns ± 3% -11.17% (p=0.000 n=14+15) PoolSingle1K-4 26.1ns ± 1% 21.2ns ± 2% -18.59% (p=0.000 n=15+14) PoolSingle64K-4 26.7ns ± 2% 21.5ns ± 2% -19.72% (p=0.000 n=15+15) MakeSingle1K-4 24.2ns ± 2% 24.3ns ± 3% ~ (p=0.132 n=13+15) MakeSingle64K-4 6.76µs ± 1% 6.96µs ± 5% +2.94% (p=0.002 n=13+13) MakeRandomSmall-4 531ns ± 4% 538ns ± 5% ~ (p=0.066 n=14+15) MakeRandomLarge-4 152µs ± 0% 152µs ± 1% -0.31% (p=0.001 n=14+13) Change-Id: I86d7d9d2cac65335baf62214fbb35ba0fd8f9528 Reviewed-on: https://go-review.googlesource.com/37416 Run-TryBot: Ian Lance Taylor <iant@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
2017-02-24 16:48:00 -07:00
if n < 0 || n > b.Len() {
panic("bytes.Buffer: truncation out of range")
}
b.buf = b.buf[0 : b.off+n]
}
// Reset resets the buffer to be empty,
// but it retains the underlying storage for use by future writes.
// Reset is the same as Truncate(0).
bytes: make bytes.Buffer cache-friendly During benchmark of an internal tool we found out that (*Buffer).Reset() was surprisingly showing up in CPU profiles. This CL contains two related changes aimed at speeding up Reset(): 1. Create a fast path for Truncate(0) by moving the logic to Reset() (this makes Reset() a simple leaf func that gets inlined since it gets compiled to 3 MOVx instructions). Accordingly change calls in the rest of the Buffer methods to call Reset() instead of Truncate(0). 2. Reorder the fields in the Buffer struct so that frequently accessed fields are packed together (buf, off, lastRead). This also make them likely to be in the same cacheline. Ideally it would be advisable to have Buffer{} cacheline-aligned, but I couldn't find a way to do this without changing the size of the bootstrap array (but this will cause some regressions, because it will make duffcopy show up in CPU profiles where it wasn't showing up before). go1 benchmarks are not really affected, but some other benchmarks that exercise Buffer more show improvements: name old time/op new time/op delta BinaryTree17-4 2.46s ± 9% 2.43s ± 3% ~ (p=0.982 n=14+14) Fannkuch11-4 2.98s ± 1% 2.90s ± 1% -2.58% (p=0.000 n=15+14) FmtFprintfEmpty-4 45.2ns ± 1% 45.2ns ± 1% ~ (p=0.494 n=14+15) FmtFprintfString-4 76.8ns ± 1% 83.1ns ± 2% +8.23% (p=0.000 n=10+15) FmtFprintfInt-4 78.0ns ± 2% 74.6ns ± 1% -4.46% (p=0.000 n=15+15) FmtFprintfIntInt-4 113ns ± 1% 109ns ± 2% -2.91% (p=0.000 n=13+15) FmtFprintfPrefixedInt-4 152ns ± 2% 143ns ± 2% -6.04% (p=0.000 n=15+14) FmtFprintfFloat-4 224ns ± 1% 222ns ± 2% -1.08% (p=0.001 n=15+14) FmtManyArgs-4 464ns ± 2% 463ns ± 2% ~ (p=0.303 n=14+15) GobDecode-4 6.25ms ± 2% 6.32ms ± 3% +1.20% (p=0.002 n=14+14) GobEncode-4 5.41ms ± 2% 5.41ms ± 2% ~ (p=0.967 n=15+15) Gzip-4 215ms ± 2% 218ms ± 2% +1.35% (p=0.002 n=15+15) Gunzip-4 34.3ms ± 2% 34.2ms ± 2% ~ (p=0.539 n=15+15) HTTPClientServer-4 76.4µs ± 2% 75.4µs ± 1% -1.31% (p=0.000 n=15+15) JSONEncode-4 14.7ms ± 2% 14.6ms ± 3% ~ (p=0.094 n=14+14) JSONDecode-4 48.0ms ± 1% 48.5ms ± 1% +0.92% (p=0.001 n=14+12) Mandelbrot200-4 4.04ms ± 2% 4.06ms ± 1% ~ (p=0.108 n=15+13) GoParse-4 2.99ms ± 2% 3.00ms ± 1% ~ (p=0.130 n=15+13) RegexpMatchEasy0_32-4 78.3ns ± 1% 79.5ns ± 1% +1.51% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 185ns ± 1% 186ns ± 1% +0.76% (p=0.005 n=15+15) RegexpMatchEasy1_32-4 79.0ns ± 2% 76.7ns ± 1% -2.87% (p=0.000 n=14+15) name old speed new speed delta GobDecode-4 123MB/s ± 2% 121MB/s ± 3% -1.18% (p=0.002 n=14+14) GobEncode-4 142MB/s ± 2% 142MB/s ± 1% ~ (p=0.959 n=15+15) Gzip-4 90.3MB/s ± 2% 89.1MB/s ± 2% -1.34% (p=0.002 n=15+15) Gunzip-4 565MB/s ± 2% 567MB/s ± 2% ~ (p=0.539 n=15+15) JSONEncode-4 132MB/s ± 2% 133MB/s ± 3% ~ (p=0.091 n=14+14) JSONDecode-4 40.4MB/s ± 1% 40.0MB/s ± 1% -0.92% (p=0.001 n=14+12) GoParse-4 19.4MB/s ± 2% 19.3MB/s ± 1% ~ (p=0.121 n=15+13) RegexpMatchEasy0_32-4 409MB/s ± 1% 403MB/s ± 1% -1.47% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 5.53GB/s ± 1% 5.49GB/s ± 1% -0.86% (p=0.002 n=15+15) RegexpMatchEasy1_32-4 405MB/s ± 2% 417MB/s ± 1% +2.94% (p=0.000 n=14+15) name old time/op new time/op delta PoolsSingle1K-4 34.9ns ± 2% 30.4ns ± 4% -12.80% (p=0.000 n=15+15) PoolsSingle64K-4 36.9ns ± 1% 34.4ns ± 4% -6.72% (p=0.000 n=14+15) PoolsRandomSmall-4 34.8ns ± 3% 29.5ns ± 1% -15.19% (p=0.000 n=15+14) PoolsRandomLarge-4 38.6ns ± 1% 34.3ns ± 3% -11.17% (p=0.000 n=14+15) PoolSingle1K-4 26.1ns ± 1% 21.2ns ± 2% -18.59% (p=0.000 n=15+14) PoolSingle64K-4 26.7ns ± 2% 21.5ns ± 2% -19.72% (p=0.000 n=15+15) MakeSingle1K-4 24.2ns ± 2% 24.3ns ± 3% ~ (p=0.132 n=13+15) MakeSingle64K-4 6.76µs ± 1% 6.96µs ± 5% +2.94% (p=0.002 n=13+13) MakeRandomSmall-4 531ns ± 4% 538ns ± 5% ~ (p=0.066 n=14+15) MakeRandomLarge-4 152µs ± 0% 152µs ± 1% -0.31% (p=0.001 n=14+13) Change-Id: I86d7d9d2cac65335baf62214fbb35ba0fd8f9528 Reviewed-on: https://go-review.googlesource.com/37416 Run-TryBot: Ian Lance Taylor <iant@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
2017-02-24 16:48:00 -07:00
func (b *Buffer) Reset() {
b.buf = b.buf[:0]
b.off = 0
b.lastRead = opInvalid
}
// grow grows the buffer to guarantee space for n more bytes.
// It returns the index where bytes should be written.
// If the buffer can't grow it will panic with ErrTooLarge.
func (b *Buffer) grow(n int) int {
m := b.Len()
// If buffer is empty, reset to recover space.
if m == 0 && b.off != 0 {
bytes: make bytes.Buffer cache-friendly During benchmark of an internal tool we found out that (*Buffer).Reset() was surprisingly showing up in CPU profiles. This CL contains two related changes aimed at speeding up Reset(): 1. Create a fast path for Truncate(0) by moving the logic to Reset() (this makes Reset() a simple leaf func that gets inlined since it gets compiled to 3 MOVx instructions). Accordingly change calls in the rest of the Buffer methods to call Reset() instead of Truncate(0). 2. Reorder the fields in the Buffer struct so that frequently accessed fields are packed together (buf, off, lastRead). This also make them likely to be in the same cacheline. Ideally it would be advisable to have Buffer{} cacheline-aligned, but I couldn't find a way to do this without changing the size of the bootstrap array (but this will cause some regressions, because it will make duffcopy show up in CPU profiles where it wasn't showing up before). go1 benchmarks are not really affected, but some other benchmarks that exercise Buffer more show improvements: name old time/op new time/op delta BinaryTree17-4 2.46s ± 9% 2.43s ± 3% ~ (p=0.982 n=14+14) Fannkuch11-4 2.98s ± 1% 2.90s ± 1% -2.58% (p=0.000 n=15+14) FmtFprintfEmpty-4 45.2ns ± 1% 45.2ns ± 1% ~ (p=0.494 n=14+15) FmtFprintfString-4 76.8ns ± 1% 83.1ns ± 2% +8.23% (p=0.000 n=10+15) FmtFprintfInt-4 78.0ns ± 2% 74.6ns ± 1% -4.46% (p=0.000 n=15+15) FmtFprintfIntInt-4 113ns ± 1% 109ns ± 2% -2.91% (p=0.000 n=13+15) FmtFprintfPrefixedInt-4 152ns ± 2% 143ns ± 2% -6.04% (p=0.000 n=15+14) FmtFprintfFloat-4 224ns ± 1% 222ns ± 2% -1.08% (p=0.001 n=15+14) FmtManyArgs-4 464ns ± 2% 463ns ± 2% ~ (p=0.303 n=14+15) GobDecode-4 6.25ms ± 2% 6.32ms ± 3% +1.20% (p=0.002 n=14+14) GobEncode-4 5.41ms ± 2% 5.41ms ± 2% ~ (p=0.967 n=15+15) Gzip-4 215ms ± 2% 218ms ± 2% +1.35% (p=0.002 n=15+15) Gunzip-4 34.3ms ± 2% 34.2ms ± 2% ~ (p=0.539 n=15+15) HTTPClientServer-4 76.4µs ± 2% 75.4µs ± 1% -1.31% (p=0.000 n=15+15) JSONEncode-4 14.7ms ± 2% 14.6ms ± 3% ~ (p=0.094 n=14+14) JSONDecode-4 48.0ms ± 1% 48.5ms ± 1% +0.92% (p=0.001 n=14+12) Mandelbrot200-4 4.04ms ± 2% 4.06ms ± 1% ~ (p=0.108 n=15+13) GoParse-4 2.99ms ± 2% 3.00ms ± 1% ~ (p=0.130 n=15+13) RegexpMatchEasy0_32-4 78.3ns ± 1% 79.5ns ± 1% +1.51% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 185ns ± 1% 186ns ± 1% +0.76% (p=0.005 n=15+15) RegexpMatchEasy1_32-4 79.0ns ± 2% 76.7ns ± 1% -2.87% (p=0.000 n=14+15) name old speed new speed delta GobDecode-4 123MB/s ± 2% 121MB/s ± 3% -1.18% (p=0.002 n=14+14) GobEncode-4 142MB/s ± 2% 142MB/s ± 1% ~ (p=0.959 n=15+15) Gzip-4 90.3MB/s ± 2% 89.1MB/s ± 2% -1.34% (p=0.002 n=15+15) Gunzip-4 565MB/s ± 2% 567MB/s ± 2% ~ (p=0.539 n=15+15) JSONEncode-4 132MB/s ± 2% 133MB/s ± 3% ~ (p=0.091 n=14+14) JSONDecode-4 40.4MB/s ± 1% 40.0MB/s ± 1% -0.92% (p=0.001 n=14+12) GoParse-4 19.4MB/s ± 2% 19.3MB/s ± 1% ~ (p=0.121 n=15+13) RegexpMatchEasy0_32-4 409MB/s ± 1% 403MB/s ± 1% -1.47% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 5.53GB/s ± 1% 5.49GB/s ± 1% -0.86% (p=0.002 n=15+15) RegexpMatchEasy1_32-4 405MB/s ± 2% 417MB/s ± 1% +2.94% (p=0.000 n=14+15) name old time/op new time/op delta PoolsSingle1K-4 34.9ns ± 2% 30.4ns ± 4% -12.80% (p=0.000 n=15+15) PoolsSingle64K-4 36.9ns ± 1% 34.4ns ± 4% -6.72% (p=0.000 n=14+15) PoolsRandomSmall-4 34.8ns ± 3% 29.5ns ± 1% -15.19% (p=0.000 n=15+14) PoolsRandomLarge-4 38.6ns ± 1% 34.3ns ± 3% -11.17% (p=0.000 n=14+15) PoolSingle1K-4 26.1ns ± 1% 21.2ns ± 2% -18.59% (p=0.000 n=15+14) PoolSingle64K-4 26.7ns ± 2% 21.5ns ± 2% -19.72% (p=0.000 n=15+15) MakeSingle1K-4 24.2ns ± 2% 24.3ns ± 3% ~ (p=0.132 n=13+15) MakeSingle64K-4 6.76µs ± 1% 6.96µs ± 5% +2.94% (p=0.002 n=13+13) MakeRandomSmall-4 531ns ± 4% 538ns ± 5% ~ (p=0.066 n=14+15) MakeRandomLarge-4 152µs ± 0% 152µs ± 1% -0.31% (p=0.001 n=14+13) Change-Id: I86d7d9d2cac65335baf62214fbb35ba0fd8f9528 Reviewed-on: https://go-review.googlesource.com/37416 Run-TryBot: Ian Lance Taylor <iant@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
2017-02-24 16:48:00 -07:00
b.Reset()
}
if len(b.buf)+n > cap(b.buf) {
var buf []byte
if b.buf == nil && n <= len(b.bootstrap) {
buf = b.bootstrap[0:]
} else if m+n <= cap(b.buf)/2 {
// We can slide things down instead of allocating a new
// slice. We only need m+n <= cap(b.buf) to slide, but
// we instead let capacity get twice as large so we
// don't spend all our time copying.
copy(b.buf[:], b.buf[b.off:])
buf = b.buf[:m]
} else {
// not enough space anywhere
buf = makeSlice(2*cap(b.buf) + n)
copy(buf, b.buf[b.off:])
}
b.buf = buf
b.off = 0
}
b.buf = b.buf[0 : b.off+m+n]
return b.off + m
}
// Grow grows the buffer's capacity, if necessary, to guarantee space for
// another n bytes. After Grow(n), at least n bytes can be written to the
// buffer without another allocation.
// If n is negative, Grow will panic.
// If the buffer can't grow it will panic with ErrTooLarge.
func (b *Buffer) Grow(n int) {
if n < 0 {
panic("bytes.Buffer.Grow: negative count")
}
m := b.grow(n)
b.buf = b.buf[0:m]
}
// Write appends the contents of p to the buffer, growing the buffer as
// needed. The return value n is the length of p; err is always nil. If the
// buffer becomes too large, Write will panic with ErrTooLarge.
func (b *Buffer) Write(p []byte) (n int, err error) {
b.lastRead = opInvalid
m := b.grow(len(p))
return copy(b.buf[m:], p), nil
}
// WriteString appends the contents of s to the buffer, growing the buffer as
// needed. The return value n is the length of s; err is always nil. If the
// buffer becomes too large, WriteString will panic with ErrTooLarge.
func (b *Buffer) WriteString(s string) (n int, err error) {
b.lastRead = opInvalid
m := b.grow(len(s))
return copy(b.buf[m:], s), nil
}
// MinRead is the minimum slice size passed to a Read call by
// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
// what is required to hold the contents of r, ReadFrom will not grow the
// underlying buffer.
const MinRead = 512
// ReadFrom reads data from r until EOF and appends it to the buffer, growing
// the buffer as needed. The return value n is the number of bytes read. Any
// error except io.EOF encountered during the read is also returned. If the
// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
b.lastRead = opInvalid
// If buffer is empty, reset to recover space.
if b.off >= len(b.buf) {
bytes: make bytes.Buffer cache-friendly During benchmark of an internal tool we found out that (*Buffer).Reset() was surprisingly showing up in CPU profiles. This CL contains two related changes aimed at speeding up Reset(): 1. Create a fast path for Truncate(0) by moving the logic to Reset() (this makes Reset() a simple leaf func that gets inlined since it gets compiled to 3 MOVx instructions). Accordingly change calls in the rest of the Buffer methods to call Reset() instead of Truncate(0). 2. Reorder the fields in the Buffer struct so that frequently accessed fields are packed together (buf, off, lastRead). This also make them likely to be in the same cacheline. Ideally it would be advisable to have Buffer{} cacheline-aligned, but I couldn't find a way to do this without changing the size of the bootstrap array (but this will cause some regressions, because it will make duffcopy show up in CPU profiles where it wasn't showing up before). go1 benchmarks are not really affected, but some other benchmarks that exercise Buffer more show improvements: name old time/op new time/op delta BinaryTree17-4 2.46s ± 9% 2.43s ± 3% ~ (p=0.982 n=14+14) Fannkuch11-4 2.98s ± 1% 2.90s ± 1% -2.58% (p=0.000 n=15+14) FmtFprintfEmpty-4 45.2ns ± 1% 45.2ns ± 1% ~ (p=0.494 n=14+15) FmtFprintfString-4 76.8ns ± 1% 83.1ns ± 2% +8.23% (p=0.000 n=10+15) FmtFprintfInt-4 78.0ns ± 2% 74.6ns ± 1% -4.46% (p=0.000 n=15+15) FmtFprintfIntInt-4 113ns ± 1% 109ns ± 2% -2.91% (p=0.000 n=13+15) FmtFprintfPrefixedInt-4 152ns ± 2% 143ns ± 2% -6.04% (p=0.000 n=15+14) FmtFprintfFloat-4 224ns ± 1% 222ns ± 2% -1.08% (p=0.001 n=15+14) FmtManyArgs-4 464ns ± 2% 463ns ± 2% ~ (p=0.303 n=14+15) GobDecode-4 6.25ms ± 2% 6.32ms ± 3% +1.20% (p=0.002 n=14+14) GobEncode-4 5.41ms ± 2% 5.41ms ± 2% ~ (p=0.967 n=15+15) Gzip-4 215ms ± 2% 218ms ± 2% +1.35% (p=0.002 n=15+15) Gunzip-4 34.3ms ± 2% 34.2ms ± 2% ~ (p=0.539 n=15+15) HTTPClientServer-4 76.4µs ± 2% 75.4µs ± 1% -1.31% (p=0.000 n=15+15) JSONEncode-4 14.7ms ± 2% 14.6ms ± 3% ~ (p=0.094 n=14+14) JSONDecode-4 48.0ms ± 1% 48.5ms ± 1% +0.92% (p=0.001 n=14+12) Mandelbrot200-4 4.04ms ± 2% 4.06ms ± 1% ~ (p=0.108 n=15+13) GoParse-4 2.99ms ± 2% 3.00ms ± 1% ~ (p=0.130 n=15+13) RegexpMatchEasy0_32-4 78.3ns ± 1% 79.5ns ± 1% +1.51% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 185ns ± 1% 186ns ± 1% +0.76% (p=0.005 n=15+15) RegexpMatchEasy1_32-4 79.0ns ± 2% 76.7ns ± 1% -2.87% (p=0.000 n=14+15) name old speed new speed delta GobDecode-4 123MB/s ± 2% 121MB/s ± 3% -1.18% (p=0.002 n=14+14) GobEncode-4 142MB/s ± 2% 142MB/s ± 1% ~ (p=0.959 n=15+15) Gzip-4 90.3MB/s ± 2% 89.1MB/s ± 2% -1.34% (p=0.002 n=15+15) Gunzip-4 565MB/s ± 2% 567MB/s ± 2% ~ (p=0.539 n=15+15) JSONEncode-4 132MB/s ± 2% 133MB/s ± 3% ~ (p=0.091 n=14+14) JSONDecode-4 40.4MB/s ± 1% 40.0MB/s ± 1% -0.92% (p=0.001 n=14+12) GoParse-4 19.4MB/s ± 2% 19.3MB/s ± 1% ~ (p=0.121 n=15+13) RegexpMatchEasy0_32-4 409MB/s ± 1% 403MB/s ± 1% -1.47% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 5.53GB/s ± 1% 5.49GB/s ± 1% -0.86% (p=0.002 n=15+15) RegexpMatchEasy1_32-4 405MB/s ± 2% 417MB/s ± 1% +2.94% (p=0.000 n=14+15) name old time/op new time/op delta PoolsSingle1K-4 34.9ns ± 2% 30.4ns ± 4% -12.80% (p=0.000 n=15+15) PoolsSingle64K-4 36.9ns ± 1% 34.4ns ± 4% -6.72% (p=0.000 n=14+15) PoolsRandomSmall-4 34.8ns ± 3% 29.5ns ± 1% -15.19% (p=0.000 n=15+14) PoolsRandomLarge-4 38.6ns ± 1% 34.3ns ± 3% -11.17% (p=0.000 n=14+15) PoolSingle1K-4 26.1ns ± 1% 21.2ns ± 2% -18.59% (p=0.000 n=15+14) PoolSingle64K-4 26.7ns ± 2% 21.5ns ± 2% -19.72% (p=0.000 n=15+15) MakeSingle1K-4 24.2ns ± 2% 24.3ns ± 3% ~ (p=0.132 n=13+15) MakeSingle64K-4 6.76µs ± 1% 6.96µs ± 5% +2.94% (p=0.002 n=13+13) MakeRandomSmall-4 531ns ± 4% 538ns ± 5% ~ (p=0.066 n=14+15) MakeRandomLarge-4 152µs ± 0% 152µs ± 1% -0.31% (p=0.001 n=14+13) Change-Id: I86d7d9d2cac65335baf62214fbb35ba0fd8f9528 Reviewed-on: https://go-review.googlesource.com/37416 Run-TryBot: Ian Lance Taylor <iant@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
2017-02-24 16:48:00 -07:00
b.Reset()
}
for {
if free := cap(b.buf) - len(b.buf); free < MinRead {
// not enough space at end
newBuf := b.buf
if b.off+free < MinRead {
// not enough space using beginning of buffer;
// double buffer capacity
newBuf = makeSlice(2*cap(b.buf) + MinRead)
}
copy(newBuf, b.buf[b.off:])
b.buf = newBuf[:len(b.buf)-b.off]
b.off = 0
}
m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
b.buf = b.buf[0 : len(b.buf)+m]
n += int64(m)
if e == io.EOF {
break
}
if e != nil {
return n, e
}
}
return n, nil // err is EOF, so return nil explicitly
}
// makeSlice allocates a slice of size n. If the allocation fails, it panics
// with ErrTooLarge.
func makeSlice(n int) []byte {
// If the make fails, give a known error.
defer func() {
if recover() != nil {
panic(ErrTooLarge)
}
}()
return make([]byte, n)
}
// WriteTo writes data to w until the buffer is drained or an error occurs.
// The return value n is the number of bytes written; it always fits into an
// int, but it is int64 to match the io.WriterTo interface. Any error
// encountered during the write is also returned.
func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
b.lastRead = opInvalid
if b.off < len(b.buf) {
nBytes := b.Len()
m, e := w.Write(b.buf[b.off:])
if m > nBytes {
panic("bytes.Buffer.WriteTo: invalid Write count")
}
b.off += m
n = int64(m)
if e != nil {
return n, e
}
// all bytes should have been written, by definition of
// Write method in io.Writer
if m != nBytes {
return n, io.ErrShortWrite
}
}
// Buffer is now empty; reset.
bytes: make bytes.Buffer cache-friendly During benchmark of an internal tool we found out that (*Buffer).Reset() was surprisingly showing up in CPU profiles. This CL contains two related changes aimed at speeding up Reset(): 1. Create a fast path for Truncate(0) by moving the logic to Reset() (this makes Reset() a simple leaf func that gets inlined since it gets compiled to 3 MOVx instructions). Accordingly change calls in the rest of the Buffer methods to call Reset() instead of Truncate(0). 2. Reorder the fields in the Buffer struct so that frequently accessed fields are packed together (buf, off, lastRead). This also make them likely to be in the same cacheline. Ideally it would be advisable to have Buffer{} cacheline-aligned, but I couldn't find a way to do this without changing the size of the bootstrap array (but this will cause some regressions, because it will make duffcopy show up in CPU profiles where it wasn't showing up before). go1 benchmarks are not really affected, but some other benchmarks that exercise Buffer more show improvements: name old time/op new time/op delta BinaryTree17-4 2.46s ± 9% 2.43s ± 3% ~ (p=0.982 n=14+14) Fannkuch11-4 2.98s ± 1% 2.90s ± 1% -2.58% (p=0.000 n=15+14) FmtFprintfEmpty-4 45.2ns ± 1% 45.2ns ± 1% ~ (p=0.494 n=14+15) FmtFprintfString-4 76.8ns ± 1% 83.1ns ± 2% +8.23% (p=0.000 n=10+15) FmtFprintfInt-4 78.0ns ± 2% 74.6ns ± 1% -4.46% (p=0.000 n=15+15) FmtFprintfIntInt-4 113ns ± 1% 109ns ± 2% -2.91% (p=0.000 n=13+15) FmtFprintfPrefixedInt-4 152ns ± 2% 143ns ± 2% -6.04% (p=0.000 n=15+14) FmtFprintfFloat-4 224ns ± 1% 222ns ± 2% -1.08% (p=0.001 n=15+14) FmtManyArgs-4 464ns ± 2% 463ns ± 2% ~ (p=0.303 n=14+15) GobDecode-4 6.25ms ± 2% 6.32ms ± 3% +1.20% (p=0.002 n=14+14) GobEncode-4 5.41ms ± 2% 5.41ms ± 2% ~ (p=0.967 n=15+15) Gzip-4 215ms ± 2% 218ms ± 2% +1.35% (p=0.002 n=15+15) Gunzip-4 34.3ms ± 2% 34.2ms ± 2% ~ (p=0.539 n=15+15) HTTPClientServer-4 76.4µs ± 2% 75.4µs ± 1% -1.31% (p=0.000 n=15+15) JSONEncode-4 14.7ms ± 2% 14.6ms ± 3% ~ (p=0.094 n=14+14) JSONDecode-4 48.0ms ± 1% 48.5ms ± 1% +0.92% (p=0.001 n=14+12) Mandelbrot200-4 4.04ms ± 2% 4.06ms ± 1% ~ (p=0.108 n=15+13) GoParse-4 2.99ms ± 2% 3.00ms ± 1% ~ (p=0.130 n=15+13) RegexpMatchEasy0_32-4 78.3ns ± 1% 79.5ns ± 1% +1.51% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 185ns ± 1% 186ns ± 1% +0.76% (p=0.005 n=15+15) RegexpMatchEasy1_32-4 79.0ns ± 2% 76.7ns ± 1% -2.87% (p=0.000 n=14+15) name old speed new speed delta GobDecode-4 123MB/s ± 2% 121MB/s ± 3% -1.18% (p=0.002 n=14+14) GobEncode-4 142MB/s ± 2% 142MB/s ± 1% ~ (p=0.959 n=15+15) Gzip-4 90.3MB/s ± 2% 89.1MB/s ± 2% -1.34% (p=0.002 n=15+15) Gunzip-4 565MB/s ± 2% 567MB/s ± 2% ~ (p=0.539 n=15+15) JSONEncode-4 132MB/s ± 2% 133MB/s ± 3% ~ (p=0.091 n=14+14) JSONDecode-4 40.4MB/s ± 1% 40.0MB/s ± 1% -0.92% (p=0.001 n=14+12) GoParse-4 19.4MB/s ± 2% 19.3MB/s ± 1% ~ (p=0.121 n=15+13) RegexpMatchEasy0_32-4 409MB/s ± 1% 403MB/s ± 1% -1.47% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 5.53GB/s ± 1% 5.49GB/s ± 1% -0.86% (p=0.002 n=15+15) RegexpMatchEasy1_32-4 405MB/s ± 2% 417MB/s ± 1% +2.94% (p=0.000 n=14+15) name old time/op new time/op delta PoolsSingle1K-4 34.9ns ± 2% 30.4ns ± 4% -12.80% (p=0.000 n=15+15) PoolsSingle64K-4 36.9ns ± 1% 34.4ns ± 4% -6.72% (p=0.000 n=14+15) PoolsRandomSmall-4 34.8ns ± 3% 29.5ns ± 1% -15.19% (p=0.000 n=15+14) PoolsRandomLarge-4 38.6ns ± 1% 34.3ns ± 3% -11.17% (p=0.000 n=14+15) PoolSingle1K-4 26.1ns ± 1% 21.2ns ± 2% -18.59% (p=0.000 n=15+14) PoolSingle64K-4 26.7ns ± 2% 21.5ns ± 2% -19.72% (p=0.000 n=15+15) MakeSingle1K-4 24.2ns ± 2% 24.3ns ± 3% ~ (p=0.132 n=13+15) MakeSingle64K-4 6.76µs ± 1% 6.96µs ± 5% +2.94% (p=0.002 n=13+13) MakeRandomSmall-4 531ns ± 4% 538ns ± 5% ~ (p=0.066 n=14+15) MakeRandomLarge-4 152µs ± 0% 152µs ± 1% -0.31% (p=0.001 n=14+13) Change-Id: I86d7d9d2cac65335baf62214fbb35ba0fd8f9528 Reviewed-on: https://go-review.googlesource.com/37416 Run-TryBot: Ian Lance Taylor <iant@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
2017-02-24 16:48:00 -07:00
b.Reset()
return
}
// WriteByte appends the byte c to the buffer, growing the buffer as needed.
// The returned error is always nil, but is included to match bufio.Writer's
// WriteByte. If the buffer becomes too large, WriteByte will panic with
// ErrTooLarge.
func (b *Buffer) WriteByte(c byte) error {
b.lastRead = opInvalid
m := b.grow(1)
b.buf[m] = c
return nil
}
// WriteRune appends the UTF-8 encoding of Unicode code point r to the
// buffer, returning its length and an error, which is always nil but is
// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
// if it becomes too large, WriteRune will panic with ErrTooLarge.
func (b *Buffer) WriteRune(r rune) (n int, err error) {
if r < utf8.RuneSelf {
b.WriteByte(byte(r))
return 1, nil
}
b.lastRead = opInvalid
m := b.grow(utf8.UTFMax)
n = utf8.EncodeRune(b.buf[m:m+utf8.UTFMax], r)
b.buf = b.buf[:m+n]
return n, nil
}
// Read reads the next len(p) bytes from the buffer or until the buffer
// is drained. The return value n is the number of bytes read. If the
// buffer has no data to return, err is io.EOF (unless len(p) is zero);
// otherwise it is nil.
func (b *Buffer) Read(p []byte) (n int, err error) {
b.lastRead = opInvalid
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
bytes: make bytes.Buffer cache-friendly During benchmark of an internal tool we found out that (*Buffer).Reset() was surprisingly showing up in CPU profiles. This CL contains two related changes aimed at speeding up Reset(): 1. Create a fast path for Truncate(0) by moving the logic to Reset() (this makes Reset() a simple leaf func that gets inlined since it gets compiled to 3 MOVx instructions). Accordingly change calls in the rest of the Buffer methods to call Reset() instead of Truncate(0). 2. Reorder the fields in the Buffer struct so that frequently accessed fields are packed together (buf, off, lastRead). This also make them likely to be in the same cacheline. Ideally it would be advisable to have Buffer{} cacheline-aligned, but I couldn't find a way to do this without changing the size of the bootstrap array (but this will cause some regressions, because it will make duffcopy show up in CPU profiles where it wasn't showing up before). go1 benchmarks are not really affected, but some other benchmarks that exercise Buffer more show improvements: name old time/op new time/op delta BinaryTree17-4 2.46s ± 9% 2.43s ± 3% ~ (p=0.982 n=14+14) Fannkuch11-4 2.98s ± 1% 2.90s ± 1% -2.58% (p=0.000 n=15+14) FmtFprintfEmpty-4 45.2ns ± 1% 45.2ns ± 1% ~ (p=0.494 n=14+15) FmtFprintfString-4 76.8ns ± 1% 83.1ns ± 2% +8.23% (p=0.000 n=10+15) FmtFprintfInt-4 78.0ns ± 2% 74.6ns ± 1% -4.46% (p=0.000 n=15+15) FmtFprintfIntInt-4 113ns ± 1% 109ns ± 2% -2.91% (p=0.000 n=13+15) FmtFprintfPrefixedInt-4 152ns ± 2% 143ns ± 2% -6.04% (p=0.000 n=15+14) FmtFprintfFloat-4 224ns ± 1% 222ns ± 2% -1.08% (p=0.001 n=15+14) FmtManyArgs-4 464ns ± 2% 463ns ± 2% ~ (p=0.303 n=14+15) GobDecode-4 6.25ms ± 2% 6.32ms ± 3% +1.20% (p=0.002 n=14+14) GobEncode-4 5.41ms ± 2% 5.41ms ± 2% ~ (p=0.967 n=15+15) Gzip-4 215ms ± 2% 218ms ± 2% +1.35% (p=0.002 n=15+15) Gunzip-4 34.3ms ± 2% 34.2ms ± 2% ~ (p=0.539 n=15+15) HTTPClientServer-4 76.4µs ± 2% 75.4µs ± 1% -1.31% (p=0.000 n=15+15) JSONEncode-4 14.7ms ± 2% 14.6ms ± 3% ~ (p=0.094 n=14+14) JSONDecode-4 48.0ms ± 1% 48.5ms ± 1% +0.92% (p=0.001 n=14+12) Mandelbrot200-4 4.04ms ± 2% 4.06ms ± 1% ~ (p=0.108 n=15+13) GoParse-4 2.99ms ± 2% 3.00ms ± 1% ~ (p=0.130 n=15+13) RegexpMatchEasy0_32-4 78.3ns ± 1% 79.5ns ± 1% +1.51% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 185ns ± 1% 186ns ± 1% +0.76% (p=0.005 n=15+15) RegexpMatchEasy1_32-4 79.0ns ± 2% 76.7ns ± 1% -2.87% (p=0.000 n=14+15) name old speed new speed delta GobDecode-4 123MB/s ± 2% 121MB/s ± 3% -1.18% (p=0.002 n=14+14) GobEncode-4 142MB/s ± 2% 142MB/s ± 1% ~ (p=0.959 n=15+15) Gzip-4 90.3MB/s ± 2% 89.1MB/s ± 2% -1.34% (p=0.002 n=15+15) Gunzip-4 565MB/s ± 2% 567MB/s ± 2% ~ (p=0.539 n=15+15) JSONEncode-4 132MB/s ± 2% 133MB/s ± 3% ~ (p=0.091 n=14+14) JSONDecode-4 40.4MB/s ± 1% 40.0MB/s ± 1% -0.92% (p=0.001 n=14+12) GoParse-4 19.4MB/s ± 2% 19.3MB/s ± 1% ~ (p=0.121 n=15+13) RegexpMatchEasy0_32-4 409MB/s ± 1% 403MB/s ± 1% -1.47% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 5.53GB/s ± 1% 5.49GB/s ± 1% -0.86% (p=0.002 n=15+15) RegexpMatchEasy1_32-4 405MB/s ± 2% 417MB/s ± 1% +2.94% (p=0.000 n=14+15) name old time/op new time/op delta PoolsSingle1K-4 34.9ns ± 2% 30.4ns ± 4% -12.80% (p=0.000 n=15+15) PoolsSingle64K-4 36.9ns ± 1% 34.4ns ± 4% -6.72% (p=0.000 n=14+15) PoolsRandomSmall-4 34.8ns ± 3% 29.5ns ± 1% -15.19% (p=0.000 n=15+14) PoolsRandomLarge-4 38.6ns ± 1% 34.3ns ± 3% -11.17% (p=0.000 n=14+15) PoolSingle1K-4 26.1ns ± 1% 21.2ns ± 2% -18.59% (p=0.000 n=15+14) PoolSingle64K-4 26.7ns ± 2% 21.5ns ± 2% -19.72% (p=0.000 n=15+15) MakeSingle1K-4 24.2ns ± 2% 24.3ns ± 3% ~ (p=0.132 n=13+15) MakeSingle64K-4 6.76µs ± 1% 6.96µs ± 5% +2.94% (p=0.002 n=13+13) MakeRandomSmall-4 531ns ± 4% 538ns ± 5% ~ (p=0.066 n=14+15) MakeRandomLarge-4 152µs ± 0% 152µs ± 1% -0.31% (p=0.001 n=14+13) Change-Id: I86d7d9d2cac65335baf62214fbb35ba0fd8f9528 Reviewed-on: https://go-review.googlesource.com/37416 Run-TryBot: Ian Lance Taylor <iant@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
2017-02-24 16:48:00 -07:00
b.Reset()
if len(p) == 0 {
return
}
return 0, io.EOF
}
n = copy(p, b.buf[b.off:])
b.off += n
if n > 0 {
b.lastRead = opRead
}
return
}
// Next returns a slice containing the next n bytes from the buffer,
// advancing the buffer as if the bytes had been returned by Read.
// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
// The slice is only valid until the next call to a read or write method.
func (b *Buffer) Next(n int) []byte {
b.lastRead = opInvalid
m := b.Len()
if n > m {
n = m
}
data := b.buf[b.off : b.off+n]
b.off += n
if n > 0 {
b.lastRead = opRead
}
return data
}
// ReadByte reads and returns the next byte from the buffer.
// If no byte is available, it returns error io.EOF.
func (b *Buffer) ReadByte() (byte, error) {
b.lastRead = opInvalid
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
bytes: make bytes.Buffer cache-friendly During benchmark of an internal tool we found out that (*Buffer).Reset() was surprisingly showing up in CPU profiles. This CL contains two related changes aimed at speeding up Reset(): 1. Create a fast path for Truncate(0) by moving the logic to Reset() (this makes Reset() a simple leaf func that gets inlined since it gets compiled to 3 MOVx instructions). Accordingly change calls in the rest of the Buffer methods to call Reset() instead of Truncate(0). 2. Reorder the fields in the Buffer struct so that frequently accessed fields are packed together (buf, off, lastRead). This also make them likely to be in the same cacheline. Ideally it would be advisable to have Buffer{} cacheline-aligned, but I couldn't find a way to do this without changing the size of the bootstrap array (but this will cause some regressions, because it will make duffcopy show up in CPU profiles where it wasn't showing up before). go1 benchmarks are not really affected, but some other benchmarks that exercise Buffer more show improvements: name old time/op new time/op delta BinaryTree17-4 2.46s ± 9% 2.43s ± 3% ~ (p=0.982 n=14+14) Fannkuch11-4 2.98s ± 1% 2.90s ± 1% -2.58% (p=0.000 n=15+14) FmtFprintfEmpty-4 45.2ns ± 1% 45.2ns ± 1% ~ (p=0.494 n=14+15) FmtFprintfString-4 76.8ns ± 1% 83.1ns ± 2% +8.23% (p=0.000 n=10+15) FmtFprintfInt-4 78.0ns ± 2% 74.6ns ± 1% -4.46% (p=0.000 n=15+15) FmtFprintfIntInt-4 113ns ± 1% 109ns ± 2% -2.91% (p=0.000 n=13+15) FmtFprintfPrefixedInt-4 152ns ± 2% 143ns ± 2% -6.04% (p=0.000 n=15+14) FmtFprintfFloat-4 224ns ± 1% 222ns ± 2% -1.08% (p=0.001 n=15+14) FmtManyArgs-4 464ns ± 2% 463ns ± 2% ~ (p=0.303 n=14+15) GobDecode-4 6.25ms ± 2% 6.32ms ± 3% +1.20% (p=0.002 n=14+14) GobEncode-4 5.41ms ± 2% 5.41ms ± 2% ~ (p=0.967 n=15+15) Gzip-4 215ms ± 2% 218ms ± 2% +1.35% (p=0.002 n=15+15) Gunzip-4 34.3ms ± 2% 34.2ms ± 2% ~ (p=0.539 n=15+15) HTTPClientServer-4 76.4µs ± 2% 75.4µs ± 1% -1.31% (p=0.000 n=15+15) JSONEncode-4 14.7ms ± 2% 14.6ms ± 3% ~ (p=0.094 n=14+14) JSONDecode-4 48.0ms ± 1% 48.5ms ± 1% +0.92% (p=0.001 n=14+12) Mandelbrot200-4 4.04ms ± 2% 4.06ms ± 1% ~ (p=0.108 n=15+13) GoParse-4 2.99ms ± 2% 3.00ms ± 1% ~ (p=0.130 n=15+13) RegexpMatchEasy0_32-4 78.3ns ± 1% 79.5ns ± 1% +1.51% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 185ns ± 1% 186ns ± 1% +0.76% (p=0.005 n=15+15) RegexpMatchEasy1_32-4 79.0ns ± 2% 76.7ns ± 1% -2.87% (p=0.000 n=14+15) name old speed new speed delta GobDecode-4 123MB/s ± 2% 121MB/s ± 3% -1.18% (p=0.002 n=14+14) GobEncode-4 142MB/s ± 2% 142MB/s ± 1% ~ (p=0.959 n=15+15) Gzip-4 90.3MB/s ± 2% 89.1MB/s ± 2% -1.34% (p=0.002 n=15+15) Gunzip-4 565MB/s ± 2% 567MB/s ± 2% ~ (p=0.539 n=15+15) JSONEncode-4 132MB/s ± 2% 133MB/s ± 3% ~ (p=0.091 n=14+14) JSONDecode-4 40.4MB/s ± 1% 40.0MB/s ± 1% -0.92% (p=0.001 n=14+12) GoParse-4 19.4MB/s ± 2% 19.3MB/s ± 1% ~ (p=0.121 n=15+13) RegexpMatchEasy0_32-4 409MB/s ± 1% 403MB/s ± 1% -1.47% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 5.53GB/s ± 1% 5.49GB/s ± 1% -0.86% (p=0.002 n=15+15) RegexpMatchEasy1_32-4 405MB/s ± 2% 417MB/s ± 1% +2.94% (p=0.000 n=14+15) name old time/op new time/op delta PoolsSingle1K-4 34.9ns ± 2% 30.4ns ± 4% -12.80% (p=0.000 n=15+15) PoolsSingle64K-4 36.9ns ± 1% 34.4ns ± 4% -6.72% (p=0.000 n=14+15) PoolsRandomSmall-4 34.8ns ± 3% 29.5ns ± 1% -15.19% (p=0.000 n=15+14) PoolsRandomLarge-4 38.6ns ± 1% 34.3ns ± 3% -11.17% (p=0.000 n=14+15) PoolSingle1K-4 26.1ns ± 1% 21.2ns ± 2% -18.59% (p=0.000 n=15+14) PoolSingle64K-4 26.7ns ± 2% 21.5ns ± 2% -19.72% (p=0.000 n=15+15) MakeSingle1K-4 24.2ns ± 2% 24.3ns ± 3% ~ (p=0.132 n=13+15) MakeSingle64K-4 6.76µs ± 1% 6.96µs ± 5% +2.94% (p=0.002 n=13+13) MakeRandomSmall-4 531ns ± 4% 538ns ± 5% ~ (p=0.066 n=14+15) MakeRandomLarge-4 152µs ± 0% 152µs ± 1% -0.31% (p=0.001 n=14+13) Change-Id: I86d7d9d2cac65335baf62214fbb35ba0fd8f9528 Reviewed-on: https://go-review.googlesource.com/37416 Run-TryBot: Ian Lance Taylor <iant@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
2017-02-24 16:48:00 -07:00
b.Reset()
return 0, io.EOF
}
c := b.buf[b.off]
b.off++
b.lastRead = opRead
return c, nil
}
// ReadRune reads and returns the next UTF-8-encoded
// Unicode code point from the buffer.
// If no bytes are available, the error returned is io.EOF.
// If the bytes are an erroneous UTF-8 encoding, it
// consumes one byte and returns U+FFFD, 1.
func (b *Buffer) ReadRune() (r rune, size int, err error) {
b.lastRead = opInvalid
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
bytes: make bytes.Buffer cache-friendly During benchmark of an internal tool we found out that (*Buffer).Reset() was surprisingly showing up in CPU profiles. This CL contains two related changes aimed at speeding up Reset(): 1. Create a fast path for Truncate(0) by moving the logic to Reset() (this makes Reset() a simple leaf func that gets inlined since it gets compiled to 3 MOVx instructions). Accordingly change calls in the rest of the Buffer methods to call Reset() instead of Truncate(0). 2. Reorder the fields in the Buffer struct so that frequently accessed fields are packed together (buf, off, lastRead). This also make them likely to be in the same cacheline. Ideally it would be advisable to have Buffer{} cacheline-aligned, but I couldn't find a way to do this without changing the size of the bootstrap array (but this will cause some regressions, because it will make duffcopy show up in CPU profiles where it wasn't showing up before). go1 benchmarks are not really affected, but some other benchmarks that exercise Buffer more show improvements: name old time/op new time/op delta BinaryTree17-4 2.46s ± 9% 2.43s ± 3% ~ (p=0.982 n=14+14) Fannkuch11-4 2.98s ± 1% 2.90s ± 1% -2.58% (p=0.000 n=15+14) FmtFprintfEmpty-4 45.2ns ± 1% 45.2ns ± 1% ~ (p=0.494 n=14+15) FmtFprintfString-4 76.8ns ± 1% 83.1ns ± 2% +8.23% (p=0.000 n=10+15) FmtFprintfInt-4 78.0ns ± 2% 74.6ns ± 1% -4.46% (p=0.000 n=15+15) FmtFprintfIntInt-4 113ns ± 1% 109ns ± 2% -2.91% (p=0.000 n=13+15) FmtFprintfPrefixedInt-4 152ns ± 2% 143ns ± 2% -6.04% (p=0.000 n=15+14) FmtFprintfFloat-4 224ns ± 1% 222ns ± 2% -1.08% (p=0.001 n=15+14) FmtManyArgs-4 464ns ± 2% 463ns ± 2% ~ (p=0.303 n=14+15) GobDecode-4 6.25ms ± 2% 6.32ms ± 3% +1.20% (p=0.002 n=14+14) GobEncode-4 5.41ms ± 2% 5.41ms ± 2% ~ (p=0.967 n=15+15) Gzip-4 215ms ± 2% 218ms ± 2% +1.35% (p=0.002 n=15+15) Gunzip-4 34.3ms ± 2% 34.2ms ± 2% ~ (p=0.539 n=15+15) HTTPClientServer-4 76.4µs ± 2% 75.4µs ± 1% -1.31% (p=0.000 n=15+15) JSONEncode-4 14.7ms ± 2% 14.6ms ± 3% ~ (p=0.094 n=14+14) JSONDecode-4 48.0ms ± 1% 48.5ms ± 1% +0.92% (p=0.001 n=14+12) Mandelbrot200-4 4.04ms ± 2% 4.06ms ± 1% ~ (p=0.108 n=15+13) GoParse-4 2.99ms ± 2% 3.00ms ± 1% ~ (p=0.130 n=15+13) RegexpMatchEasy0_32-4 78.3ns ± 1% 79.5ns ± 1% +1.51% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 185ns ± 1% 186ns ± 1% +0.76% (p=0.005 n=15+15) RegexpMatchEasy1_32-4 79.0ns ± 2% 76.7ns ± 1% -2.87% (p=0.000 n=14+15) name old speed new speed delta GobDecode-4 123MB/s ± 2% 121MB/s ± 3% -1.18% (p=0.002 n=14+14) GobEncode-4 142MB/s ± 2% 142MB/s ± 1% ~ (p=0.959 n=15+15) Gzip-4 90.3MB/s ± 2% 89.1MB/s ± 2% -1.34% (p=0.002 n=15+15) Gunzip-4 565MB/s ± 2% 567MB/s ± 2% ~ (p=0.539 n=15+15) JSONEncode-4 132MB/s ± 2% 133MB/s ± 3% ~ (p=0.091 n=14+14) JSONDecode-4 40.4MB/s ± 1% 40.0MB/s ± 1% -0.92% (p=0.001 n=14+12) GoParse-4 19.4MB/s ± 2% 19.3MB/s ± 1% ~ (p=0.121 n=15+13) RegexpMatchEasy0_32-4 409MB/s ± 1% 403MB/s ± 1% -1.47% (p=0.000 n=15+14) RegexpMatchEasy0_1K-4 5.53GB/s ± 1% 5.49GB/s ± 1% -0.86% (p=0.002 n=15+15) RegexpMatchEasy1_32-4 405MB/s ± 2% 417MB/s ± 1% +2.94% (p=0.000 n=14+15) name old time/op new time/op delta PoolsSingle1K-4 34.9ns ± 2% 30.4ns ± 4% -12.80% (p=0.000 n=15+15) PoolsSingle64K-4 36.9ns ± 1% 34.4ns ± 4% -6.72% (p=0.000 n=14+15) PoolsRandomSmall-4 34.8ns ± 3% 29.5ns ± 1% -15.19% (p=0.000 n=15+14) PoolsRandomLarge-4 38.6ns ± 1% 34.3ns ± 3% -11.17% (p=0.000 n=14+15) PoolSingle1K-4 26.1ns ± 1% 21.2ns ± 2% -18.59% (p=0.000 n=15+14) PoolSingle64K-4 26.7ns ± 2% 21.5ns ± 2% -19.72% (p=0.000 n=15+15) MakeSingle1K-4 24.2ns ± 2% 24.3ns ± 3% ~ (p=0.132 n=13+15) MakeSingle64K-4 6.76µs ± 1% 6.96µs ± 5% +2.94% (p=0.002 n=13+13) MakeRandomSmall-4 531ns ± 4% 538ns ± 5% ~ (p=0.066 n=14+15) MakeRandomLarge-4 152µs ± 0% 152µs ± 1% -0.31% (p=0.001 n=14+13) Change-Id: I86d7d9d2cac65335baf62214fbb35ba0fd8f9528 Reviewed-on: https://go-review.googlesource.com/37416 Run-TryBot: Ian Lance Taylor <iant@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
2017-02-24 16:48:00 -07:00
b.Reset()
return 0, 0, io.EOF
}
c := b.buf[b.off]
if c < utf8.RuneSelf {
b.off++
b.lastRead = opReadRune1
return rune(c), 1, nil
}
r, n := utf8.DecodeRune(b.buf[b.off:])
b.off += n
b.lastRead = readOp(n)
return r, n, nil
}
// UnreadRune unreads the last rune returned by ReadRune.
// If the most recent read or write operation on the buffer was
// not a ReadRune, UnreadRune returns an error. (In this regard
// it is stricter than UnreadByte, which will unread the last byte
// from any read operation.)
func (b *Buffer) UnreadRune() error {
if b.lastRead <= opInvalid {
return errors.New("bytes.Buffer: UnreadRune: previous operation was not ReadRune")
}
if b.off >= int(b.lastRead) {
b.off -= int(b.lastRead)
}
b.lastRead = opInvalid
return nil
}
// UnreadByte unreads the last byte returned by the most recent
// read operation. If write has happened since the last read, UnreadByte
// returns an error.
func (b *Buffer) UnreadByte() error {
if b.lastRead == opInvalid {
return errors.New("bytes.Buffer: UnreadByte: previous operation was not a read")
}
b.lastRead = opInvalid
if b.off > 0 {
b.off--
}
return nil
}
// ReadBytes reads until the first occurrence of delim in the input,
// returning a slice containing the data up to and including the delimiter.
// If ReadBytes encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadBytes returns err != nil if and only if the returned data does not end in
// delim.
func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
slice, err := b.readSlice(delim)
// return a copy of slice. The buffer's backing array may
// be overwritten by later calls.
line = append(line, slice...)
return
}
// readSlice is like ReadBytes but returns a reference to internal buffer data.
func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
i := IndexByte(b.buf[b.off:], delim)
end := b.off + i + 1
if i < 0 {
end = len(b.buf)
err = io.EOF
}
line = b.buf[b.off:end]
b.off = end
b.lastRead = opRead
return line, err
}
// ReadString reads until the first occurrence of delim in the input,
// returning a string containing the data up to and including the delimiter.
// If ReadString encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadString returns err != nil if and only if the returned data does not end
// in delim.
func (b *Buffer) ReadString(delim byte) (line string, err error) {
slice, err := b.readSlice(delim)
return string(slice), err
}
// NewBuffer creates and initializes a new Buffer using buf as its initial
// contents. It is intended to prepare a Buffer to read existing data. It
// can also be used to size the internal buffer for writing. To do that,
// buf should have the desired capacity but a length of zero.
//
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
// sufficient to initialize a Buffer.
func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
// NewBufferString creates and initializes a new Buffer using string s as its
// initial contents. It is intended to prepare a buffer to read an existing
// string.
//
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
// sufficient to initialize a Buffer.
func NewBufferString(s string) *Buffer {
return &Buffer{buf: []byte(s)}
}