From f4cd2a71208fa7d50392bd5a00f15981d0a5d6df Mon Sep 17 00:00:00 2001 From: greatroar <@> Date: Tue, 26 May 2020 13:22:38 +0200 Subject: [PATCH 1/2] Make backend benchmarks fairer by removing checks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Checking whether the right data is returned takes up half the time in some benchmarks. Results for local backend benchmarks on linux/amd64: name old time/op new time/op delta Backend/BenchmarkLoadFile-8 4.89ms ± 0% 2.72ms ± 1% -44.26% (p=0.008 n=5+5) Backend/BenchmarkLoadPartialFile-8 936µs ± 6% 439µs ±15% -53.07% (p=0.008 n=5+5) Backend/BenchmarkLoadPartialFileOffset-8 940µs ± 1% 456µs ±10% -51.50% (p=0.008 n=5+5) Backend/BenchmarkSave-8 23.9ms ±14% 24.8ms ±41% ~ (p=0.690 n=5+5) name old speed new speed delta Backend/BenchmarkLoadFile-8 3.43GB/s ± 0% 6.16GB/s ± 1% +79.40% (p=0.008 n=5+5) Backend/BenchmarkLoadPartialFile-8 4.48GB/s ± 6% 9.63GB/s ±14% +114.78% (p=0.008 n=5+5) Backend/BenchmarkLoadPartialFileOffset-8 4.46GB/s ± 1% 9.22GB/s ±10% +106.74% (p=0.008 n=5+5) Backend/BenchmarkSave-8 706MB/s ±13% 698MB/s ±31% ~ (p=0.690 n=5+5) --- internal/backend/test/benchmarks.go | 43 ++++++++++++++--------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/internal/backend/test/benchmarks.go b/internal/backend/test/benchmarks.go index 302768f2e..94c3b6c9e 100644 --- a/internal/backend/test/benchmarks.go +++ b/internal/backend/test/benchmarks.go @@ -48,17 +48,17 @@ func (s *Suite) BenchmarkLoadFile(t *testing.B) { n, ierr = io.ReadFull(rd, buf) return ierr }) - if err != nil { + + t.StopTimer() + switch { + case err != nil: t.Fatal(err) - } - - if n != length { + case n != length: t.Fatalf("wrong number of bytes read: want %v, got %v", length, n) - } - - if !bytes.Equal(data, buf) { + case !bytes.Equal(data, buf): t.Fatalf("wrong bytes returned") } + t.StartTimer() } } @@ -85,18 +85,17 @@ func (s *Suite) BenchmarkLoadPartialFile(t *testing.B) { n, ierr = io.ReadFull(rd, buf) return ierr }) - if err != nil { + + t.StopTimer() + switch { + case err != nil: t.Fatal(err) - } - - if n != testLength { + case n != testLength: t.Fatalf("wrong number of bytes read: want %v, got %v", testLength, n) - } - - if !bytes.Equal(data[:testLength], buf) { + case !bytes.Equal(data[:testLength], buf): t.Fatalf("wrong bytes returned") } - + t.StartTimer() } } @@ -124,17 +123,17 @@ func (s *Suite) BenchmarkLoadPartialFileOffset(t *testing.B) { n, ierr = io.ReadFull(rd, buf) return ierr }) - if err != nil { + + t.StopTimer() + switch { + case err != nil: t.Fatal(err) - } - - if n != testLength { + case n != testLength: t.Fatalf("wrong number of bytes read: want %v, got %v", testLength, n) - } - - if !bytes.Equal(data[testOffset:testOffset+testLength], buf) { + case !bytes.Equal(data[testOffset:testOffset+testLength], buf): t.Fatalf("wrong bytes returned") } + t.StartTimer() } } From 190d8e2f510c251cf93b582de1c0cbd842fa4d09 Mon Sep 17 00:00:00 2001 From: greatroar <@> Date: Tue, 26 May 2020 13:35:07 +0200 Subject: [PATCH 2/2] Flatten backend.LimitedReadCloser structure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This inlines the io.LimitedReader into the LimitedReadCloser body to achieve fewer allocations. Results on linux/amd64: name old time/op new time/op delta Backend/BenchmarkLoadPartialFile-8 412µs ± 4% 413µs ± 4% ~ (p=0.634 n=17+17) Backend/BenchmarkLoadPartialFileOffset-8 455µs ±13% 441µs ±10% ~ (p=0.072 n=20+18) name old speed new speed delta Backend/BenchmarkLoadPartialFile-8 10.2GB/s ± 3% 10.2GB/s ± 4% ~ (p=0.817 n=16+17) Backend/BenchmarkLoadPartialFileOffset-8 9.25GB/s ±12% 9.54GB/s ± 9% ~ (p=0.072 n=20+18) name old alloc/op new alloc/op delta Backend/BenchmarkLoadPartialFile-8 888B ± 0% 872B ± 0% -1.80% (p=0.000 n=15+15) Backend/BenchmarkLoadPartialFileOffset-8 888B ± 0% 872B ± 0% -1.80% (p=0.000 n=15+15) name old allocs/op new allocs/op delta Backend/BenchmarkLoadPartialFile-8 18.0 ± 0% 17.0 ± 0% -5.56% (p=0.000 n=15+15) Backend/BenchmarkLoadPartialFileOffset-8 18.0 ± 0% 17.0 ± 0% -5.56% (p=0.000 n=15+15) --- internal/backend/utils.go | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/internal/backend/utils.go b/internal/backend/utils.go index 1665aedc6..e6dc7a549 100644 --- a/internal/backend/utils.go +++ b/internal/backend/utils.go @@ -32,19 +32,14 @@ func LoadAll(ctx context.Context, buf []byte, be restic.Backend, h restic.Handle // LimitedReadCloser wraps io.LimitedReader and exposes the Close() method. type LimitedReadCloser struct { - io.ReadCloser - io.Reader + io.Closer + io.LimitedReader } -// Read reads data from the limited reader. -func (l *LimitedReadCloser) Read(p []byte) (int, error) { - return l.Reader.Read(p) -} - -// LimitReadCloser returns a new reader wraps r in an io.LimitReader, but also +// LimitReadCloser returns a new reader wraps r in an io.LimitedReader, but also // exposes the Close() method. func LimitReadCloser(r io.ReadCloser, n int64) *LimitedReadCloser { - return &LimitedReadCloser{ReadCloser: r, Reader: io.LimitReader(r, n)} + return &LimitedReadCloser{Closer: r, LimitedReader: io.LimitedReader{R: r, N: n}} } // DefaultLoad implements Backend.Load using lower-level openReader func