testing: support b.SetBytes(); implement sub-benchmarks.
Этот коммит содержится в:
родитель
29f7ebc63e
коммит
f80efa5b8b
4 изменённых файлов: 156 добавлений и 20 удалений
4
Makefile
4
Makefile
|
@ -254,8 +254,12 @@ TEST_PACKAGES_WASI = \
|
||||||
.PHONY: tinygo-test
|
.PHONY: tinygo-test
|
||||||
tinygo-test:
|
tinygo-test:
|
||||||
$(TINYGO) test $(TEST_PACKAGES)
|
$(TINYGO) test $(TEST_PACKAGES)
|
||||||
|
tinygo-bench:
|
||||||
|
$(TINYGO) test -bench . $(TEST_PACKAGES)
|
||||||
tinygo-test-wasi:
|
tinygo-test-wasi:
|
||||||
$(TINYGO) test -target wasi $(TEST_PACKAGES_WASI)
|
$(TINYGO) test -target wasi $(TEST_PACKAGES_WASI)
|
||||||
|
tinygo-bench-wasi:
|
||||||
|
$(TINYGO) test -target wasi -bench . $(TEST_PACKAGES_WASI)
|
||||||
|
|
||||||
.PHONY: smoketest
|
.PHONY: smoketest
|
||||||
smoketest:
|
smoketest:
|
||||||
|
|
|
@ -8,6 +8,9 @@ package testing
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -44,8 +47,11 @@ type B struct {
|
||||||
hasSub bool // TODO: should be in common, and atomic
|
hasSub bool // TODO: should be in common, and atomic
|
||||||
start time.Time // TODO: should be in common
|
start time.Time // TODO: should be in common
|
||||||
duration time.Duration // TODO: should be in common
|
duration time.Duration // TODO: should be in common
|
||||||
|
context *benchContext
|
||||||
N int
|
N int
|
||||||
benchFunc func(b *B)
|
benchFunc func(b *B)
|
||||||
|
bytes int64
|
||||||
|
missingBytes bool // one of the subbenchmarks does not have bytes set.
|
||||||
benchTime benchTimeFlag
|
benchTime benchTimeFlag
|
||||||
timerOn bool
|
timerOn bool
|
||||||
result BenchmarkResult
|
result BenchmarkResult
|
||||||
|
@ -82,15 +88,13 @@ func (b *B) ResetTimer() {
|
||||||
|
|
||||||
// SetBytes records the number of bytes processed in a single operation.
|
// SetBytes records the number of bytes processed in a single operation.
|
||||||
// If this is called, the benchmark will report ns/op and MB/s.
|
// If this is called, the benchmark will report ns/op and MB/s.
|
||||||
func (b *B) SetBytes(n int64) {
|
func (b *B) SetBytes(n int64) { b.bytes = n }
|
||||||
panic("testing: unimplemented: B.SetBytes")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReportAllocs enables malloc statistics for this benchmark.
|
// ReportAllocs enables malloc statistics for this benchmark.
|
||||||
// It is equivalent to setting -test.benchmem, but it only affects the
|
// It is equivalent to setting -test.benchmem, but it only affects the
|
||||||
// benchmark function that calls ReportAllocs.
|
// benchmark function that calls ReportAllocs.
|
||||||
func (b *B) ReportAllocs() {
|
func (b *B) ReportAllocs() {
|
||||||
panic("testing: unimplemented: B.ReportAllocs")
|
return // TODO: implement
|
||||||
}
|
}
|
||||||
|
|
||||||
// runN runs a single benchmark for the specified number of iterations.
|
// runN runs a single benchmark for the specified number of iterations.
|
||||||
|
@ -119,13 +123,31 @@ func max(x, y int64) int64 {
|
||||||
// run1 runs the first iteration of benchFunc. It reports whether more
|
// run1 runs the first iteration of benchFunc. It reports whether more
|
||||||
// iterations of this benchmarks should be run.
|
// iterations of this benchmarks should be run.
|
||||||
func (b *B) run1() bool {
|
func (b *B) run1() bool {
|
||||||
|
if ctx := b.context; ctx != nil {
|
||||||
|
// Extend maxLen, if needed.
|
||||||
|
if n := len(b.name); n > ctx.maxLen {
|
||||||
|
ctx.maxLen = n + 8 // Add additional slack to avoid too many jumps in size.
|
||||||
|
}
|
||||||
|
}
|
||||||
b.runN(1)
|
b.runN(1)
|
||||||
return !b.hasSub
|
return !b.hasSub
|
||||||
}
|
}
|
||||||
|
|
||||||
// run executes the benchmark.
|
// run executes the benchmark.
|
||||||
func (b *B) run() {
|
func (b *B) run() {
|
||||||
|
if b.context != nil {
|
||||||
|
// Running go test --test.bench
|
||||||
|
b.processBench(b.context) // calls doBench and prints results
|
||||||
|
} else {
|
||||||
|
// Running func Benchmark.
|
||||||
|
b.doBench()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *B) doBench() BenchmarkResult {
|
||||||
|
// in upstream, this uses a goroutine
|
||||||
b.launch()
|
b.launch()
|
||||||
|
return b.result
|
||||||
}
|
}
|
||||||
|
|
||||||
// launch launches the benchmark function. It gradually increases the number
|
// launch launches the benchmark function. It gradually increases the number
|
||||||
|
@ -159,13 +181,14 @@ func (b *B) launch() {
|
||||||
n = min(n, 1e9)
|
n = min(n, 1e9)
|
||||||
b.runN(int(n))
|
b.runN(int(n))
|
||||||
}
|
}
|
||||||
b.result = BenchmarkResult{b.N, b.duration}
|
b.result = BenchmarkResult{b.N, b.duration, b.bytes}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BenchmarkResult contains the results of a benchmark run.
|
// BenchmarkResult contains the results of a benchmark run.
|
||||||
type BenchmarkResult struct {
|
type BenchmarkResult struct {
|
||||||
N int // The number of iterations.
|
N int // The number of iterations.
|
||||||
T time.Duration // The total time taken.
|
T time.Duration // The total time taken.
|
||||||
|
Bytes int64 // Bytes processed in one iteration.
|
||||||
}
|
}
|
||||||
|
|
||||||
// NsPerOp returns the "ns/op" metric.
|
// NsPerOp returns the "ns/op" metric.
|
||||||
|
@ -176,6 +199,14 @@ func (r BenchmarkResult) NsPerOp() int64 {
|
||||||
return r.T.Nanoseconds() / int64(r.N)
|
return r.T.Nanoseconds() / int64(r.N)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mbPerSec returns the "MB/s" metric.
|
||||||
|
func (r BenchmarkResult) mbPerSec() float64 {
|
||||||
|
if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
|
||||||
|
}
|
||||||
|
|
||||||
// AllocsPerOp returns the "allocs/op" metric,
|
// AllocsPerOp returns the "allocs/op" metric,
|
||||||
// which is calculated as r.MemAllocs / r.N.
|
// which is calculated as r.MemAllocs / r.N.
|
||||||
func (r BenchmarkResult) AllocsPerOp() int64 {
|
func (r BenchmarkResult) AllocsPerOp() int64 {
|
||||||
|
@ -188,10 +219,71 @@ func (r BenchmarkResult) AllocedBytesPerOp() int64 {
|
||||||
return 0 // Dummy version to allow running e.g. golang.org/test/fibo.go
|
return 0 // Dummy version to allow running e.g. golang.org/test/fibo.go
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// String returns a summary of the benchmark results.
|
||||||
|
// It follows the benchmark result line format from
|
||||||
|
// https://golang.org/design/14313-benchmark-format, not including the
|
||||||
|
// benchmark name.
|
||||||
|
// Extra metrics override built-in metrics of the same name.
|
||||||
|
// String does not include allocs/op or B/op, since those are reported
|
||||||
|
// by MemString.
|
||||||
|
func (r BenchmarkResult) String() string {
|
||||||
|
buf := new(strings.Builder)
|
||||||
|
fmt.Fprintf(buf, "%8d", r.N)
|
||||||
|
|
||||||
|
// Get ns/op as a float.
|
||||||
|
ns := float64(r.T.Nanoseconds()) / float64(r.N)
|
||||||
|
if ns != 0 {
|
||||||
|
buf.WriteByte('\t')
|
||||||
|
prettyPrint(buf, ns, "ns/op")
|
||||||
|
}
|
||||||
|
|
||||||
|
if mbs := r.mbPerSec(); mbs != 0 {
|
||||||
|
fmt.Fprintf(buf, "\t%7.2f MB/s", mbs)
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func prettyPrint(w io.Writer, x float64, unit string) {
|
||||||
|
// Print all numbers with 10 places before the decimal point
|
||||||
|
// and small numbers with four sig figs. Field widths are
|
||||||
|
// chosen to fit the whole part in 10 places while aligning
|
||||||
|
// the decimal point of all fractional formats.
|
||||||
|
var format string
|
||||||
|
switch y := math.Abs(x); {
|
||||||
|
case y == 0 || y >= 999.95:
|
||||||
|
format = "%10.0f %s"
|
||||||
|
case y >= 99.995:
|
||||||
|
format = "%12.1f %s"
|
||||||
|
case y >= 9.9995:
|
||||||
|
format = "%13.2f %s"
|
||||||
|
case y >= 0.99995:
|
||||||
|
format = "%14.3f %s"
|
||||||
|
case y >= 0.099995:
|
||||||
|
format = "%15.4f %s"
|
||||||
|
case y >= 0.0099995:
|
||||||
|
format = "%16.5f %s"
|
||||||
|
case y >= 0.00099995:
|
||||||
|
format = "%17.6f %s"
|
||||||
|
default:
|
||||||
|
format = "%18.7f %s"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, format, x, unit)
|
||||||
|
}
|
||||||
|
|
||||||
|
type benchContext struct {
|
||||||
|
maxLen int // The largest recorded benchmark name.
|
||||||
|
}
|
||||||
|
|
||||||
func runBenchmarks(benchmarks []InternalBenchmark) bool {
|
func runBenchmarks(benchmarks []InternalBenchmark) bool {
|
||||||
if len(benchmarks) == 0 {
|
if len(benchmarks) == 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
ctx := &benchContext{}
|
||||||
|
for _, Benchmark := range benchmarks {
|
||||||
|
if l := len(Benchmark.Name); l > ctx.maxLen {
|
||||||
|
ctx.maxLen = l
|
||||||
|
}
|
||||||
|
}
|
||||||
main := &B{
|
main := &B{
|
||||||
common: common{
|
common: common{
|
||||||
name: "Main",
|
name: "Main",
|
||||||
|
@ -199,30 +291,55 @@ func runBenchmarks(benchmarks []InternalBenchmark) bool {
|
||||||
benchTime: benchTime,
|
benchTime: benchTime,
|
||||||
benchFunc: func(b *B) {
|
benchFunc: func(b *B) {
|
||||||
for _, Benchmark := range benchmarks {
|
for _, Benchmark := range benchmarks {
|
||||||
if flagVerbose {
|
|
||||||
fmt.Printf("=== RUN %s\n", Benchmark.Name)
|
|
||||||
}
|
|
||||||
b.Run(Benchmark.Name, Benchmark.F)
|
b.Run(Benchmark.Name, Benchmark.F)
|
||||||
fmt.Printf("--- Result: %d ns/op\n", b.result.NsPerOp())
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
context: ctx,
|
||||||
}
|
}
|
||||||
|
|
||||||
main.runN(1)
|
main.runN(1)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// processBench runs bench b and prints the results.
|
||||||
|
func (b *B) processBench(ctx *benchContext) {
|
||||||
|
benchName := b.name
|
||||||
|
|
||||||
|
if ctx != nil {
|
||||||
|
fmt.Printf("%-*s\t", ctx.maxLen, benchName)
|
||||||
|
}
|
||||||
|
r := b.doBench()
|
||||||
|
if b.failed {
|
||||||
|
// The output could be very long here, but probably isn't.
|
||||||
|
// We print it all, regardless, because we don't want to trim the reason
|
||||||
|
// the benchmark failed.
|
||||||
|
fmt.Printf("--- FAIL: %s\n%s", benchName, "") // b.output)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ctx != nil {
|
||||||
|
results := r.String()
|
||||||
|
fmt.Println(results)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Run benchmarks f as a subbenchmark with the given name. It reports
|
// Run benchmarks f as a subbenchmark with the given name. It reports
|
||||||
// true if the subbenchmark succeeded.
|
// true if the subbenchmark succeeded.
|
||||||
//
|
//
|
||||||
// A subbenchmark is like any other benchmark. A benchmark that calls Run at
|
// A subbenchmark is like any other benchmark. A benchmark that calls Run at
|
||||||
// least once will not be measured itself and will be called once with N=1.
|
// least once will not be measured itself and will be called once with N=1.
|
||||||
func (b *B) Run(name string, f func(b *B)) bool {
|
func (b *B) Run(name string, f func(b *B)) bool {
|
||||||
|
if b.level > 0 {
|
||||||
|
name = b.name + "/" + name
|
||||||
|
}
|
||||||
b.hasSub = true
|
b.hasSub = true
|
||||||
sub := &B{
|
sub := &B{
|
||||||
common: common{name: name},
|
common: common{
|
||||||
|
name: name,
|
||||||
|
level: b.level + 1,
|
||||||
|
},
|
||||||
benchFunc: f,
|
benchFunc: f,
|
||||||
benchTime: b.benchTime,
|
benchTime: b.benchTime,
|
||||||
|
context: b.context,
|
||||||
}
|
}
|
||||||
if sub.run1() {
|
if sub.run1() {
|
||||||
sub.run()
|
sub.run()
|
||||||
|
@ -240,6 +357,15 @@ func (b *B) add(other BenchmarkResult) {
|
||||||
// in sequence in a single benchmark.
|
// in sequence in a single benchmark.
|
||||||
r.N = 1
|
r.N = 1
|
||||||
r.T += time.Duration(other.NsPerOp())
|
r.T += time.Duration(other.NsPerOp())
|
||||||
|
if other.Bytes == 0 {
|
||||||
|
// Summing Bytes is meaningless in aggregate if not all subbenchmarks
|
||||||
|
// set it.
|
||||||
|
b.missingBytes = true
|
||||||
|
r.Bytes = 0
|
||||||
|
}
|
||||||
|
if !b.missingBytes {
|
||||||
|
r.Bytes += other.Bytes
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// A PB is used by RunParallel for running parallel benchmarks.
|
// A PB is used by RunParallel for running parallel benchmarks.
|
||||||
|
|
|
@ -48,3 +48,8 @@ func TestBenchmark(t *testing.T) {
|
||||||
t.Errorf("Expected speedup >= 0.3, got %f", speedup)
|
t.Errorf("Expected speedup >= 0.3, got %f", speedup)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkSub(b *testing.B) {
|
||||||
|
b.Run("Fast", func(b *testing.B) { BenchmarkFastNonASCII(b) })
|
||||||
|
b.Run("Slow", func(b *testing.B) { BenchmarkSlowNonASCII(b) })
|
||||||
|
}
|
||||||
|
|
|
@ -48,6 +48,7 @@ type common struct {
|
||||||
failed bool // Test or benchmark has failed.
|
failed bool // Test or benchmark has failed.
|
||||||
skipped bool // Test of benchmark has been skipped.
|
skipped bool // Test of benchmark has been skipped.
|
||||||
finished bool // Test function has completed.
|
finished bool // Test function has completed.
|
||||||
|
level int // Nesting depth of test or benchmark.
|
||||||
name string // Name of test or benchmark.
|
name string // Name of test or benchmark.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Загрузка…
Создание таблицы
Сослаться в новой задаче