Skip to content

Commit

Permalink
Clean up test decoders (#190)
Browse files Browse the repository at this point in the history
* Clean up test decoders
* Update changelog.

Adds gorouine leak detection. Only tests themselves were leaking.

Fixes #189  (as much as possible)
  • Loading branch information
klauspost authored Dec 4, 2019
1 parent b87d2c4 commit 438db64
Show file tree
Hide file tree
Showing 6 changed files with 96 additions and 15 deletions.
8 changes: 6 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,12 @@ This package provides various compression algorithms.
[![fuzzit](https://app.fuzzit.dev/badge?org_id=klauspost)](https://fuzzit.dev)

# changelog
* Nov 28, 2018 (v1.9.3) Less allocations in stateless deflate.
* Nov 28, 2018: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184)

* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188)
* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187)
* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines.
* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate.
* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184)
* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate.
* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180)
* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB.
Expand Down
4 changes: 4 additions & 0 deletions zstd/blockdec.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ type blockDec struct {
sequenceBuf []seq
tmp [4]byte
err error
decWG sync.WaitGroup
}

func (b *blockDec) String() string {
Expand All @@ -105,6 +106,7 @@ func newBlockDec(lowMem bool) *blockDec {
input: make(chan struct{}, 1),
history: make(chan *history, 1),
}
b.decWG.Add(1)
go b.startDecoder()
return &b
}
Expand Down Expand Up @@ -183,11 +185,13 @@ func (b *blockDec) Close() {
close(b.input)
close(b.history)
close(b.result)
b.decWG.Wait()
}

// decodeAsync will prepare decoding the block when it receives input.
// This will separate output and history.
func (b *blockDec) startDecoder() {
defer b.decWG.Done()
for range b.input {
//println("blockDec: Got block input")
switch b.Type {
Expand Down
19 changes: 18 additions & 1 deletion zstd/decoder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
"reflect"
"runtime"
"strings"
"sync"
"testing"
"time"

Expand Down Expand Up @@ -86,6 +87,7 @@ func TestNewReaderMismatch(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer dec.Close()
var tmp [8]byte
xx := xxhash.New()
var cHash int
Expand Down Expand Up @@ -180,6 +182,7 @@ func TestNewReaderRead(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer dec.Close()
_, err = dec.Read([]byte{0})
if err == nil {
t.Fatal("Wanted error on uninitialized read, got nil")
Expand Down Expand Up @@ -501,6 +504,7 @@ func TestDecoder_Reset(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer dec.Close()
decoded, err := dec.DecodeAll(dst, nil)
if err != nil {
t.Error(err, len(decoded))
Expand Down Expand Up @@ -1064,13 +1068,15 @@ func testDecoderDecodeAll(t *testing.T, fn string, dec *Decoder) {
}
want[tt.Name+".zst"], _ = ioutil.ReadAll(r)
}

var wg sync.WaitGroup
for i, tt := range zr.File {
tt := tt
if !strings.HasSuffix(tt.Name, ".zst") || (testing.Short() && i > 20) {
continue
}
wg.Add(1)
t.Run("DecodeAll-"+tt.Name, func(t *testing.T) {
defer wg.Done()
t.Parallel()
r, err := tt.Open()
if err != nil {
Expand Down Expand Up @@ -1108,6 +1114,10 @@ func testDecoderDecodeAll(t *testing.T, fn string, dec *Decoder) {
t.Log(len(got), "bytes returned, matches input, ok!")
})
}
go func() {
wg.Wait()
dec.Close()
}()
}

func testDecoderDecodeAllError(t *testing.T, fn string, dec *Decoder) {
Expand All @@ -1120,12 +1130,15 @@ func testDecoderDecodeAllError(t *testing.T, fn string, dec *Decoder) {
t.Fatal(err)
}

var wg sync.WaitGroup
for _, tt := range zr.File {
tt := tt
if !strings.HasSuffix(tt.Name, ".zst") {
continue
}
wg.Add(1)
t.Run("DecodeAll-"+tt.Name, func(t *testing.T) {
defer wg.Done()
t.Parallel()
r, err := tt.Open()
if err != nil {
Expand All @@ -1142,6 +1155,10 @@ func testDecoderDecodeAllError(t *testing.T, fn string, dec *Decoder) {
}
})
}
go func() {
wg.Wait()
dec.Close()
}()
}

// Test our predefined tables are correct.
Expand Down
49 changes: 37 additions & 12 deletions zstd/encoder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,23 +28,26 @@ func TestEncoder_EncodeAllSimple(t *testing.T) {
if err != nil {
t.Fatal(err)
}
dec, err := NewReader(nil)
if err != nil {
t.Fatal(err)
}
defer dec.Close()

in = append(in, in...)
for level := EncoderLevel(speedNotSet + 1); level < speedLast; level++ {
t.Run(level.String(), func(t *testing.T) {
e, err := NewWriter(nil, WithEncoderLevel(level))
if err != nil {
t.Fatal(err)
}
defer e.Close()
start := time.Now()
dst := e.EncodeAll(in, nil)
t.Log("Simple Encoder len", len(in), "-> zstd len", len(dst))
mbpersec := (float64(len(in)) / (1024 * 1024)) / (float64(time.Since(start)) / (float64(time.Second)))
t.Logf("Encoded %d bytes with %.2f MB/s", len(in), mbpersec)

dec, err := NewReader(nil)
if err != nil {
t.Fatal(err)
}
decoded, err := dec.DecodeAll(dst, nil)
if err != nil {
t.Error(err, len(decoded))
Expand Down Expand Up @@ -119,6 +122,7 @@ func TestEncoder_EncodeAllEncodeXML(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer dec.Close()
in, err := ioutil.ReadAll(dec)
if err != nil {
t.Fatal(err)
Expand All @@ -130,6 +134,7 @@ func TestEncoder_EncodeAllEncodeXML(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer e.Close()
start := time.Now()
dst := e.EncodeAll(in, nil)
t.Log("Simple Encoder len", len(in), "-> zstd len", len(dst))
Expand Down Expand Up @@ -183,6 +188,7 @@ func TestEncoderRegression(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer enc.Close()

for i, tt := range zr.File {
if !strings.HasSuffix(t.Name(), "") {
Expand Down Expand Up @@ -245,6 +251,12 @@ func TestEncoder_EncodeAllTwain(t *testing.T) {
testWindowSizes = []int{1 << 20}
}

dec, err := NewReader(nil)
if err != nil {
t.Fatal(err)
}
defer dec.Close()

for level := EncoderLevel(speedNotSet + 1); level < speedLast; level++ {
t.Run(level.String(), func(t *testing.T) {
for _, windowSize := range testWindowSizes {
Expand All @@ -253,16 +265,13 @@ func TestEncoder_EncodeAllTwain(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer e.Close()
start := time.Now()
dst := e.EncodeAll(in, nil)
t.Log("Simple Encoder len", len(in), "-> zstd len", len(dst))
mbpersec := (float64(len(in)) / (1024 * 1024)) / (float64(time.Since(start)) / (float64(time.Second)))
t.Logf("Encoded %d bytes with %.2f MB/s", len(in), mbpersec)

dec, err := NewReader(nil)
if err != nil {
t.Fatal(err)
}
decoded, err := dec.DecodeAll(dst, nil)
if err != nil {
t.Error(err, len(decoded))
Expand All @@ -288,6 +297,12 @@ func TestEncoder_EncodeAllPi(t *testing.T) {
testWindowSizes = []int{1 << 20}
}

dec, err := NewReader(nil)
if err != nil {
t.Fatal(err)
}
defer dec.Close()

for level := EncoderLevel(speedNotSet + 1); level < speedLast; level++ {
t.Run(level.String(), func(t *testing.T) {
for _, windowSize := range testWindowSizes {
Expand All @@ -296,16 +311,13 @@ func TestEncoder_EncodeAllPi(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer e.Close()
start := time.Now()
dst := e.EncodeAll(in, nil)
t.Log("Simple Encoder len", len(in), "-> zstd len", len(dst))
mbpersec := (float64(len(in)) / (1024 * 1024)) / (float64(time.Since(start)) / (float64(time.Second)))
t.Logf("Encoded %d bytes with %.2f MB/s", len(in), mbpersec)

dec, err := NewReader(nil)
if err != nil {
t.Fatal(err)
}
decoded, err := dec.DecodeAll(dst, nil)
if err != nil {
t.Error(err, len(decoded))
Expand All @@ -331,6 +343,7 @@ func TestWithEncoderPadding(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer d.Close()

for i := 0; i < n; i++ {
padding := (rng.Int() & 0xfff) + 1
Expand Down Expand Up @@ -471,11 +484,13 @@ func testEncoderRoundtrip(t *testing.T, file string, wantCRC []byte) {
if err != nil {
t.Fatal(err)
}
defer dec2.Close()

enc, err := NewWriter(pw, WithEncoderCRC(true), WithEncoderLevel(level))
if err != nil {
t.Fatal(err)
}
defer enc.Close()
var wantSize int64
start := time.Now()
go func() {
Expand Down Expand Up @@ -560,11 +575,13 @@ func testEncoderRoundtripWriter(t *testing.T, file string, wantCRC []byte) {
if err != nil {
t.Fatal(err)
}
defer dec2.Close()

enc, err := NewWriter(pw, WithEncoderCRC(true))
if err != nil {
t.Fatal(err)
}
defer enc.Close()
encW := writerWrapper{w: enc}
var wantSize int64
start := time.Now()
Expand Down Expand Up @@ -638,6 +655,7 @@ func TestEncoder_EncodeAllSilesia(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer dec.Close()
decoded, err := dec.DecodeAll(dst, nil)
if err != nil {
t.Error(err, len(decoded))
Expand All @@ -659,6 +677,7 @@ func TestEncoder_EncodeAllEmpty(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer e.Close()
dst := e.EncodeAll(in, nil)
if len(dst) == 0 {
t.Fatal("Requested zero frame, but got nothing.")
Expand All @@ -669,6 +688,7 @@ func TestEncoder_EncodeAllEmpty(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer dec.Close()
decoded, err := dec.DecodeAll(dst, nil)
if err != nil {
t.Error(err, len(decoded))
Expand Down Expand Up @@ -784,6 +804,7 @@ func BenchmarkEncoder_EncodeAllSimple(b *testing.B) {
if err != nil {
b.Fatal(err)
}
defer enc.Close()
dst := enc.EncodeAll(in, nil)
wantSize := len(dst)
b.ResetTimer()
Expand Down Expand Up @@ -876,6 +897,7 @@ func BenchmarkRandomEncodeAllFastest(b *testing.B) {
data[i] = uint8(rng.Intn(256))
}
enc, _ := NewWriter(nil, WithEncoderLevel(SpeedFastest), WithEncoderConcurrency(1))
defer enc.Close()
dst := enc.EncodeAll(data, nil)
wantSize := len(dst)
b.ResetTimer()
Expand All @@ -896,6 +918,7 @@ func BenchmarkRandomEncodeAllDefault(b *testing.B) {
data[i] = uint8(rng.Intn(256))
}
enc, _ := NewWriter(nil, WithEncoderLevel(SpeedDefault), WithEncoderConcurrency(1))
defer enc.Close()
dst := enc.EncodeAll(data, nil)
wantSize := len(dst)
b.ResetTimer()
Expand All @@ -917,6 +940,7 @@ func BenchmarkRandomEncoderFastest(b *testing.B) {
}
wantSize := int64(len(data))
enc, _ := NewWriter(ioutil.Discard, WithEncoderLevel(SpeedFastest))
defer enc.Close()
n, err := io.Copy(enc, bytes.NewBuffer(data))
if err != nil {
b.Fatal(err)
Expand Down Expand Up @@ -947,6 +971,7 @@ func BenchmarkRandomEncoderDefault(b *testing.B) {
}
wantSize := int64(len(data))
enc, _ := NewWriter(ioutil.Discard, WithEncoderLevel(SpeedDefault))
defer enc.Close()
n, err := io.Copy(enc, bytes.NewBuffer(data))
if err != nil {
b.Fatal(err)
Expand Down
3 changes: 3 additions & 0 deletions zstd/snappy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ func TestSnappy_ConvertSimple(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer dec.Close()
decoded, err := dec.DecodeAll(dst.Bytes(), nil)
if err != nil {
t.Error(err, len(decoded))
Expand All @@ -62,6 +63,7 @@ func TestSnappy_ConvertXML(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer dec.Close()
in, err := ioutil.ReadAll(dec)
if err != nil {
t.Fatal(err)
Expand Down Expand Up @@ -138,6 +140,7 @@ func TestSnappy_ConvertSilesia(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer dec.Close()
decoded, err := dec.DecodeAll(dst.Bytes(), nil)
if err != nil {
t.Error(err, len(decoded))
Expand Down
Loading

0 comments on commit 438db64

Please sign in to comment.