diff --git a/s2/encode.go b/s2/encode.go index 8f89e21a2f..d88e91b50a 100644 --- a/s2/encode.go +++ b/s2/encode.go @@ -487,7 +487,9 @@ func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { // EncodeBuffer will add a buffer to the stream. // This is the fastest way to encode a stream, // but the input buffer cannot be written to by the caller -// until this function, Flush or Close has been called. +// until Flush or Close has been called when concurrency != 1. +// +// If you cannot control that, use the regular Write function. // // Note that input is not buffered. // This means that each write will result in discrete blocks being created. diff --git a/s2/encode_test.go b/s2/encode_test.go index 0a3c0ccbdc..cd97bf1f18 100644 --- a/s2/encode_test.go +++ b/s2/encode_test.go @@ -277,6 +277,110 @@ func TestWriterPadding(t *testing.T) { } } +func TestBigRegularWrites(t *testing.T) { + var buf [maxBlockSize * 2]byte + dst := bytes.NewBuffer(nil) + enc := NewWriter(dst, WriterBestCompression()) + max := uint8(10) + if testing.Short() { + max = 4 + } + for n := uint8(0); n < max; n++ { + for i := range buf[:] { + buf[i] = n + } + // Writes may not keep a reference to the data beyond the Write call. + _, err := enc.Write(buf[:]) + if err != nil { + t.Fatal(err) + } + } + err := enc.Close() + if err != nil { + t.Fatal(err) + } + + dec := NewReader(dst) + _, err = io.Copy(ioutil.Discard, dec) + if err != nil { + t.Fatal(err) + } +} + +func TestBigEncodeBuffer(t *testing.T) { + const blockSize = 1 << 20 + var buf [blockSize * 2]byte + dst := bytes.NewBuffer(nil) + enc := NewWriter(dst, WriterBlockSize(blockSize), WriterBestCompression()) + max := uint8(10) + if testing.Short() { + max = 4 + } + for n := uint8(0); n < max; n++ { + // Change the buffer to a new value. + for i := range buf[:] { + buf[i] = n + } + err := enc.EncodeBuffer(buf[:]) + if err != nil { + t.Fatal(err) + } + // We can write it again since we aren't changing it. + err = enc.EncodeBuffer(buf[:]) + if err != nil { + t.Fatal(err) + } + err = enc.Flush() + if err != nil { + t.Fatal(err) + } + } + err := enc.Close() + if err != nil { + t.Fatal(err) + } + + dec := NewReader(dst) + n, err := io.Copy(ioutil.Discard, dec) + if err != nil { + t.Fatal(err) + } + t.Log(n) +} + +func TestBigEncodeBufferSync(t *testing.T) { + const blockSize = 1 << 20 + var buf [blockSize * 2]byte + dst := bytes.NewBuffer(nil) + enc := NewWriter(dst, WriterBlockSize(blockSize), WriterConcurrency(1), WriterBestCompression()) + max := uint8(10) + if testing.Short() { + max = 2 + } + for n := uint8(0); n < max; n++ { + // Change the buffer to a new value. + for i := range buf[:] { + buf[i] = n + } + // When WriterConcurrency == 1 we can encode and reuse the buffer. + err := enc.EncodeBuffer(buf[:]) + if err != nil { + t.Fatal(err) + } + } + err := enc.Close() + if err != nil { + t.Fatal(err) + } + + dec := NewReader(dst) + n, err := io.Copy(ioutil.Discard, dec) + if err != nil { + t.Fatal(err) + } + t.Log(n) +} + func BenchmarkWriterRandom(b *testing.B) { rng := rand.New(rand.NewSource(1)) // Make max window so we never get matches.