1
0
mirror of https://github.com/golang/go synced 2024-11-19 14:54:43 -07:00

archive/zip: make benchmarks more representative

Currently zip benchmarks spend 60% in the rleBuffer code,
which is used only to test zip archive/zip itself:
    17.48s 37.02% 37.02%     18.12s 38.37%  archive/zip.(*rleBuffer).ReadAt
     9.51s 20.14% 57.16%     10.43s 22.09%  archive/zip.(*rleBuffer).Write
     9.15s 19.38% 76.54%     10.85s 22.98%  compress/flate.(*compressor).deflate

This means that benchmarks currently test performance of test helper.
Updating ReadAt/Write methods to be more performant makes benchmarks closer to real world.

name                       old time/op    new time/op    delta
CompressedZipGarbage-8       2.34ms ± 0%    2.34ms ± 1%     ~     (p=0.684 n=10+10)
Zip64Test-8                  58.1ms ± 2%    10.7ms ± 1%  -81.54%  (p=0.000 n=10+10)
Zip64TestSizes/4096-8        4.05µs ± 2%    3.65µs ± 5%   -9.96%  (p=0.000 n=9+10)
Zip64TestSizes/1048576-8      238µs ± 0%      43µs ± 0%  -82.06%  (p=0.000 n=10+10)
Zip64TestSizes/67108864-8    15.3ms ± 1%     2.6ms ± 0%  -83.12%  (p=0.000 n=10+9)

name                       old alloc/op   new alloc/op   delta
CompressedZipGarbage-8       17.9kB ±14%    16.0kB ±24%  -10.48%  (p=0.026 n=9+10)

name                       old allocs/op  new allocs/op  delta
CompressedZipGarbage-8         44.0 ± 0%      44.0 ± 0%     ~     (all equal)

Change-Id: Idfd920d0e4bed4aec2f5be84dc7e3919d9f1dd2d
Reviewed-on: https://go-review.googlesource.com/83857
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
This commit is contained in:
Ilya Tocar 2017-12-13 15:25:50 -06:00 committed by Brad Fitzpatrick
parent 8999b1d6c9
commit 2629703a5c

View File

@ -140,14 +140,7 @@ func (r *rleBuffer) Write(p []byte) (n int, err error) {
rp = &r.buf[len(r.buf)-1]
// Fast path, if p is entirely the same byte repeated.
if lastByte := rp.b; len(p) > 0 && p[0] == lastByte {
all := true
for _, b := range p {
if b != lastByte {
all = false
break
}
}
if all {
if bytes.Count(p, []byte{lastByte}) == len(p) {
rp.n += int64(len(p))
return len(p), nil
}
@ -165,6 +158,25 @@ func (r *rleBuffer) Write(p []byte) (n int, err error) {
return len(p), nil
}
func min(x, y int) int {
if x < y {
return x
}
return y
}
func memset(a []byte, b byte) {
if len(a) == 0 {
return
}
// Double, until we reach power of 2 >= len(a), same as bytes.Repeat,
// but without allocation.
a[0] = b
for i, l := 1, len(a); i < l; i *= 2 {
copy(a[i:], a[:i])
}
}
func (r *rleBuffer) ReadAt(p []byte, off int64) (n int, err error) {
if len(p) == 0 {
return
@ -176,16 +188,13 @@ func (r *rleBuffer) ReadAt(p []byte, off int64) (n int, err error) {
parts := r.buf[skipParts:]
if len(parts) > 0 {
skipBytes := off - parts[0].off
for len(parts) > 0 {
part := parts[0]
for i := skipBytes; i < part.n; i++ {
if n == len(p) {
return
}
p[n] = part.b
n++
for _, part := range parts {
repeat := min(int(part.n-skipBytes), len(p)-n)
memset(p[n:n+repeat], part.b)
n += repeat
if n == len(p) {
return
}
parts = parts[1:]
skipBytes = 0
}
}