Skip to content
This repository has been archived by the owner on Aug 13, 2019. It is now read-only.

Do not use syncPool for chunks when querying #427

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions block.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,9 @@ type ChunkReader interface {
// Chunk returns the series data chunk with the given reference.
Chunk(ref uint64) (chunkenc.Chunk, error)

// Put will return a chunk back to a backing pool.
Put(chk chunkenc.Chunk)

// Close releases all underlying resources of the reader.
Close() error
}
Expand Down
6 changes: 4 additions & 2 deletions block_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"testing"

"github.com/go-kit/kit/log"
"github.com/prometheus/tsdb/chunkenc"
"github.com/prometheus/tsdb/index"
"github.com/prometheus/tsdb/labels"
"github.com/prometheus/tsdb/testutil"
Expand Down Expand Up @@ -105,15 +106,16 @@ func createPopulatedBlock(tb testing.TB, dir string, nSeries int, mint, maxt int
testutil.Ok(tb, err)
}

compactor, err := NewLeveledCompactor(nil, log.NewNopLogger(), []int64{1000000}, nil)
pool := chunkenc.NewPool()
compactor, err := NewLeveledCompactor(nil, log.NewNopLogger(), []int64{1000000}, pool)
testutil.Ok(tb, err)

testutil.Ok(tb, os.MkdirAll(dir, 0777))

ulid, err := compactor.Write(dir, head, head.MinTime(), head.MaxTime(), nil)
testutil.Ok(tb, err)

blk, err := OpenBlock(filepath.Join(dir, ulid.String()), nil)
blk, err := OpenBlock(filepath.Join(dir, ulid.String()), pool)
testutil.Ok(tb, err)
return blk
}
15 changes: 9 additions & 6 deletions chunks/chunks.go
Original file line number Diff line number Diff line change
Expand Up @@ -310,9 +310,6 @@ func newReader(bs []ByteSlice, cs []io.Closer, pool chunkenc.Pool) (*Reader, err

// NewReader returns a new chunk reader against the given byte slices.
func NewReader(bs []ByteSlice, pool chunkenc.Pool) (*Reader, error) {
if pool == nil {
pool = chunkenc.NewPool()
}
return newReader(bs, nil, pool)
}

Expand All @@ -323,9 +320,6 @@ func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) {
if err != nil {
return nil, err
}
if pool == nil {
pool = chunkenc.NewPool()
}

var bs []ByteSlice
var cs []io.Closer
Expand Down Expand Up @@ -368,9 +362,18 @@ func (s *Reader) Chunk(ref uint64) (chunkenc.Chunk, error) {
}
r = b.Range(off+n, off+n+int(l))

if s.pool == nil {
return chunkenc.FromData(chunkenc.Encoding(r[0]), r[1:1+l])
}
return s.pool.Get(chunkenc.Encoding(r[0]), r[1:1+l])
}

func (s *Reader) Put(chk chunkenc.Chunk) {
if s.pool != nil && chk != nil {
s.pool.Put(chk)
}
}

func nextSequenceFile(dir string) (string, int, error) {
names, err := fileutil.ReadDir(dir)
if err != nil {
Expand Down
4 changes: 4 additions & 0 deletions head.go
Original file line number Diff line number Diff line change
Expand Up @@ -973,6 +973,10 @@ func (h *headChunkReader) Close() error {
return nil
}

func (h *headChunkReader) Put(_ chunkenc.Chunk) {
// This is a noop.
}

// packChunkID packs a seriesID and a chunkID within it into a global 8 byte ID.
// It panicks if the seriesID exceeds 5 bytes or the chunk ID 3 bytes.
func packChunkID(seriesID, chunkID uint64) uint64 {
Expand Down
4 changes: 4 additions & 0 deletions querier.go
Original file line number Diff line number Diff line change
Expand Up @@ -633,6 +633,10 @@ func (s *populatedChunkSeries) Next() bool {
continue
}

for _, chk := range s.chks {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we choose that implementing writing to the pool on queries is the best way to go, would there be any concern about doing this at the beginning of the Next() block? Then they can be used right away.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah , I have been looking at this for the past 2 days and although it is an awkward place to put it couldn't find a better way to structure the code.

s.chunks.Put(chk.Chunk)
}

s.lset = lset
s.chks = chks
s.intervals = dranges
Expand Down
4 changes: 4 additions & 0 deletions querier_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1331,6 +1331,10 @@ func (cr mockChunkReader) Chunk(id uint64) (chunkenc.Chunk, error) {
return nil, errors.New("Chunk with ref not found")
}

func (cr mockChunkReader) Put(_ chunkenc.Chunk) {
// this is a noop.
}

func (cr mockChunkReader) Close() error {
return nil
}
Expand Down