mirror of
https://github.com/golang/go
synced 2024-11-06 07:36:13 -07:00
testing: add B.ReportMetric for custom benchmark metrics
This adds a ReportMetric method to testing.B that lets the user report custom benchmark metrics and override built-in metrics. Fixes #26037. Change-Id: I8236fbde3683fc27bbe45cbbedfd377b435edf64 Reviewed-on: https://go-review.googlesource.com/c/go/+/166717 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org> Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
This commit is contained in:
parent
349e7df2c3
commit
834d229eb6
@ -8,13 +8,17 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"internal/race"
|
"internal/race"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
"unicode"
|
||||||
)
|
)
|
||||||
|
|
||||||
var matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`")
|
var matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`")
|
||||||
@ -101,6 +105,8 @@ type B struct {
|
|||||||
// The net total of this test after being run.
|
// The net total of this test after being run.
|
||||||
netAllocs uint64
|
netAllocs uint64
|
||||||
netBytes uint64
|
netBytes uint64
|
||||||
|
// Extra metrics collected by ReportMetric.
|
||||||
|
extra map[string]float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartTimer starts timing a test. This function is called automatically
|
// StartTimer starts timing a test. This function is called automatically
|
||||||
@ -129,9 +135,19 @@ func (b *B) StopTimer() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResetTimer zeros the elapsed benchmark time and memory allocation counters.
|
// ResetTimer zeros the elapsed benchmark time and memory allocation counters
|
||||||
|
// and deletes user-reported metrics.
|
||||||
// It does not affect whether the timer is running.
|
// It does not affect whether the timer is running.
|
||||||
func (b *B) ResetTimer() {
|
func (b *B) ResetTimer() {
|
||||||
|
if b.extra == nil {
|
||||||
|
// Allocate the extra map before reading memory stats.
|
||||||
|
// Pre-size it to make more allocation unlikely.
|
||||||
|
b.extra = make(map[string]float64, 16)
|
||||||
|
} else {
|
||||||
|
for k := range b.extra {
|
||||||
|
delete(b.extra, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
if b.timerOn {
|
if b.timerOn {
|
||||||
runtime.ReadMemStats(&memStats)
|
runtime.ReadMemStats(&memStats)
|
||||||
b.startAllocs = memStats.Mallocs
|
b.startAllocs = memStats.Mallocs
|
||||||
@ -328,7 +344,26 @@ func (b *B) launch() {
|
|||||||
b.runN(n)
|
b.runN(n)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes}
|
b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes, b.extra}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReportMetric adds "n unit" to the reported benchmark results.
|
||||||
|
// If the metric is per-iteration, the caller should divide by b.N,
|
||||||
|
// and by convention units should end in "/op".
|
||||||
|
// ReportMetric overrides any previously reported value for the same unit.
|
||||||
|
// ReportMetric panics if unit is the empty string or if unit contains
|
||||||
|
// any whitespace.
|
||||||
|
// If unit is a unit normally reported by the benchmark framework itself
|
||||||
|
// (such as "allocs/op"), ReportMetric will override that metric.
|
||||||
|
// Setting "ns/op" to 0 will suppress that built-in metric.
|
||||||
|
func (b *B) ReportMetric(n float64, unit string) {
|
||||||
|
if unit == "" {
|
||||||
|
panic("metric unit must not be empty")
|
||||||
|
}
|
||||||
|
if strings.IndexFunc(unit, unicode.IsSpace) >= 0 {
|
||||||
|
panic("metric unit must not contain whitespace")
|
||||||
|
}
|
||||||
|
b.extra[unit] = n
|
||||||
}
|
}
|
||||||
|
|
||||||
// The results of a benchmark run.
|
// The results of a benchmark run.
|
||||||
@ -338,56 +373,117 @@ type BenchmarkResult struct {
|
|||||||
Bytes int64 // Bytes processed in one iteration.
|
Bytes int64 // Bytes processed in one iteration.
|
||||||
MemAllocs uint64 // The total number of memory allocations.
|
MemAllocs uint64 // The total number of memory allocations.
|
||||||
MemBytes uint64 // The total number of bytes allocated.
|
MemBytes uint64 // The total number of bytes allocated.
|
||||||
|
|
||||||
|
// Extra records additional metrics reported by ReportMetric.
|
||||||
|
Extra map[string]float64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NsPerOp returns the "ns/op" metric.
|
||||||
func (r BenchmarkResult) NsPerOp() int64 {
|
func (r BenchmarkResult) NsPerOp() int64 {
|
||||||
|
if v, ok := r.Extra["ns/op"]; ok {
|
||||||
|
return int64(v)
|
||||||
|
}
|
||||||
if r.N <= 0 {
|
if r.N <= 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return r.T.Nanoseconds() / int64(r.N)
|
return r.T.Nanoseconds() / int64(r.N)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mbPerSec returns the "MB/s" metric.
|
||||||
func (r BenchmarkResult) mbPerSec() float64 {
|
func (r BenchmarkResult) mbPerSec() float64 {
|
||||||
|
if v, ok := r.Extra["MB/s"]; ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
|
if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
|
return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllocsPerOp returns r.MemAllocs / r.N.
|
// AllocsPerOp returns the "allocs/op" metric,
|
||||||
|
// which is calculated as r.MemAllocs / r.N.
|
||||||
func (r BenchmarkResult) AllocsPerOp() int64 {
|
func (r BenchmarkResult) AllocsPerOp() int64 {
|
||||||
|
if v, ok := r.Extra["allocs/op"]; ok {
|
||||||
|
return int64(v)
|
||||||
|
}
|
||||||
if r.N <= 0 {
|
if r.N <= 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return int64(r.MemAllocs) / int64(r.N)
|
return int64(r.MemAllocs) / int64(r.N)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllocedBytesPerOp returns r.MemBytes / r.N.
|
// AllocedBytesPerOp returns the "B/op" metric,
|
||||||
|
// which is calculated as r.MemBytes / r.N.
|
||||||
func (r BenchmarkResult) AllocedBytesPerOp() int64 {
|
func (r BenchmarkResult) AllocedBytesPerOp() int64 {
|
||||||
|
if v, ok := r.Extra["B/op"]; ok {
|
||||||
|
return int64(v)
|
||||||
|
}
|
||||||
if r.N <= 0 {
|
if r.N <= 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return int64(r.MemBytes) / int64(r.N)
|
return int64(r.MemBytes) / int64(r.N)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// String returns a summary of the benchmark results.
|
||||||
|
// It follows the benchmark result line format from
|
||||||
|
// https://golang.org/design/14313-benchmark-format, not including the
|
||||||
|
// benchmark name.
|
||||||
|
// Extra metrics override built-in metrics of the same name.
|
||||||
|
// String does not include allocs/op or B/op, since those are reported
|
||||||
|
// by MemString.
|
||||||
func (r BenchmarkResult) String() string {
|
func (r BenchmarkResult) String() string {
|
||||||
mbs := r.mbPerSec()
|
buf := new(strings.Builder)
|
||||||
mb := ""
|
fmt.Fprintf(buf, "%8d", r.N)
|
||||||
if mbs != 0 {
|
|
||||||
mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
|
if ns := r.NsPerOp(); ns != 0 {
|
||||||
|
buf.WriteByte('\t')
|
||||||
|
prettyPrint(buf, float64(ns), "ns/op")
|
||||||
}
|
}
|
||||||
nsop := r.NsPerOp()
|
|
||||||
ns := fmt.Sprintf("%10d ns/op", nsop)
|
if mbs := r.mbPerSec(); mbs != 0 {
|
||||||
if r.N > 0 && nsop < 100 {
|
fmt.Fprintf(buf, "\t%7.2f MB/s", mbs)
|
||||||
// The format specifiers here make sure that
|
|
||||||
// the ones digits line up for all three possible formats.
|
|
||||||
if nsop < 10 {
|
|
||||||
ns = fmt.Sprintf("%13.2f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
|
|
||||||
} else {
|
|
||||||
ns = fmt.Sprintf("%12.1f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Print extra metrics that aren't represented in the standard
|
||||||
|
// metrics.
|
||||||
|
var extraKeys []string
|
||||||
|
for k := range r.Extra {
|
||||||
|
switch k {
|
||||||
|
case "ns/op", "MB/s", "B/op", "allocs/op":
|
||||||
|
// Built-in metrics reported elsewhere.
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%8d\t%s%s", r.N, ns, mb)
|
extraKeys = append(extraKeys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(extraKeys)
|
||||||
|
for _, k := range extraKeys {
|
||||||
|
buf.WriteByte('\t')
|
||||||
|
prettyPrint(buf, r.Extra[k], k)
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func prettyPrint(w io.Writer, x float64, unit string) {
|
||||||
|
// Print all numbers with 10 places before the decimal point
|
||||||
|
// and small numbers with three sig figs.
|
||||||
|
var format string
|
||||||
|
switch y := math.Abs(x); {
|
||||||
|
case y == 0 || y >= 99.95:
|
||||||
|
format = "%10.0f %s"
|
||||||
|
case y >= 9.995:
|
||||||
|
format = "%12.1f %s"
|
||||||
|
case y >= 0.9995:
|
||||||
|
format = "%13.2f %s"
|
||||||
|
case y >= 0.09995:
|
||||||
|
format = "%14.3f %s"
|
||||||
|
case y >= 0.009995:
|
||||||
|
format = "%15.4f %s"
|
||||||
|
case y >= 0.0009995:
|
||||||
|
format = "%16.5f %s"
|
||||||
|
default:
|
||||||
|
format = "%17.6f %s"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, format, x, unit)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MemString returns r.AllocedBytesPerOp and r.AllocsPerOp in the same format as 'go test'.
|
// MemString returns r.AllocedBytesPerOp and r.AllocsPerOp in the same format as 'go test'.
|
||||||
|
@ -7,6 +7,8 @@ package testing_test
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"text/template"
|
"text/template"
|
||||||
@ -63,6 +65,32 @@ func TestRoundUp(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var prettyPrintTests = []struct {
|
||||||
|
v float64
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{0, " 0 x"},
|
||||||
|
{1234.1, " 1234 x"},
|
||||||
|
{-1234.1, " -1234 x"},
|
||||||
|
{99.950001, " 100 x"},
|
||||||
|
{99.949999, " 99.9 x"},
|
||||||
|
{9.9950001, " 10.0 x"},
|
||||||
|
{9.9949999, " 9.99 x"},
|
||||||
|
{-9.9949999, " -9.99 x"},
|
||||||
|
{0.0099950001, " 0.0100 x"},
|
||||||
|
{0.0099949999, " 0.00999 x"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrettyPrint(t *testing.T) {
|
||||||
|
for _, tt := range prettyPrintTests {
|
||||||
|
buf := new(strings.Builder)
|
||||||
|
testing.PrettyPrint(buf, tt.v, "x")
|
||||||
|
if tt.expected != buf.String() {
|
||||||
|
t.Errorf("prettyPrint(%v): expected %q, actual %q", tt.v, tt.expected, buf.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestRunParallel(t *testing.T) {
|
func TestRunParallel(t *testing.T) {
|
||||||
testing.Benchmark(func(b *testing.B) {
|
testing.Benchmark(func(b *testing.B) {
|
||||||
procs := uint32(0)
|
procs := uint32(0)
|
||||||
@ -111,3 +139,38 @@ func ExampleB_RunParallel() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReportMetric(t *testing.T) {
|
||||||
|
res := testing.Benchmark(func(b *testing.B) {
|
||||||
|
b.ReportMetric(12345, "ns/op")
|
||||||
|
b.ReportMetric(0.2, "frobs/op")
|
||||||
|
})
|
||||||
|
// Test built-in overriding.
|
||||||
|
if res.NsPerOp() != 12345 {
|
||||||
|
t.Errorf("NsPerOp: expected %v, actual %v", 12345, res.NsPerOp())
|
||||||
|
}
|
||||||
|
// Test stringing.
|
||||||
|
res.N = 1 // Make the output stable
|
||||||
|
want := " 1\t 12345 ns/op\t 0.200 frobs/op"
|
||||||
|
if want != res.String() {
|
||||||
|
t.Errorf("expected %q, actual %q", want, res.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleB_ReportMetric() {
|
||||||
|
// This reports a custom benchmark metric relevant to a
|
||||||
|
// specific algorithm (in this case, sorting).
|
||||||
|
testing.Benchmark(func(b *testing.B) {
|
||||||
|
var compares int64
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s := []int{5, 4, 3, 2, 1}
|
||||||
|
sort.Slice(s, func(i, j int) bool {
|
||||||
|
compares++
|
||||||
|
return s[i] < s[j]
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// This metric is per-operation, so divide by b.N and
|
||||||
|
// report it as a "/op" unit.
|
||||||
|
b.ReportMetric(float64(compares)/float64(b.N), "compares/op")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
@ -7,4 +7,5 @@ package testing
|
|||||||
var (
|
var (
|
||||||
RoundDown10 = roundDown10
|
RoundDown10 = roundDown10
|
||||||
RoundUp = roundUp
|
RoundUp = roundUp
|
||||||
|
PrettyPrint = prettyPrint
|
||||||
)
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user