From b3678cc853aa96270889a200f65483ab77537cd3 Mon Sep 17 00:00:00 2001 From: Chris Marchbanks Date: Tue, 23 Oct 2018 15:11:55 -0600 Subject: [PATCH] Do not use syncPool for chunks when querying The only time any chunks are currently put into the syncPool is during a compaction. This means that during almost all normal operation a new chunk is still being created, along with the extra overhead of going through the syncPool Get process. Signed-off-by: Chris Marchbanks --- chunks/chunks.go | 9 +++------ db.go | 6 ++---- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/chunks/chunks.go b/chunks/chunks.go index 5eab2398..e99d408a 100644 --- a/chunks/chunks.go +++ b/chunks/chunks.go @@ -310,9 +310,6 @@ func newReader(bs []ByteSlice, cs []io.Closer, pool chunkenc.Pool) (*Reader, err // NewReader returns a new chunk reader against the given byte slices. func NewReader(bs []ByteSlice, pool chunkenc.Pool) (*Reader, error) { - if pool == nil { - pool = chunkenc.NewPool() - } return newReader(bs, nil, pool) } @@ -323,9 +320,6 @@ func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) { if err != nil { return nil, err } - if pool == nil { - pool = chunkenc.NewPool() - } var bs []ByteSlice var cs []io.Closer @@ -368,6 +362,9 @@ func (s *Reader) Chunk(ref uint64) (chunkenc.Chunk, error) { } r = b.Range(off+n, off+n+int(l)) + if s.pool == nil { + return chunkenc.FromData(chunkenc.Encoding(r[0]), r[1:1+l]) + } return s.pool.Get(chunkenc.Encoding(r[0]), r[1:1+l]) } diff --git a/db.go b/db.go index 8f452137..646d8e77 100644 --- a/db.go +++ b/db.go @@ -99,7 +99,6 @@ type DB struct { logger log.Logger metrics *dbMetrics opts *Options - chunkPool chunkenc.Pool compactor Compactor // Mutex for that must be held when modifying the general block layout. @@ -233,7 +232,6 @@ func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db donec: make(chan struct{}), stopc: make(chan struct{}), compactionsEnabled: true, - chunkPool: chunkenc.NewPool(), } db.metrics = newDBMetrics(db, r) @@ -249,7 +247,7 @@ func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db db.lockf = lockf } - db.compactor, err = NewLeveledCompactor(r, l, opts.BlockRanges, db.chunkPool) + db.compactor, err = NewLeveledCompactor(r, l, opts.BlockRanges, chunkenc.NewPool()) if err != nil { return nil, errors.Wrap(err, "create leveled compactor") } @@ -521,7 +519,7 @@ func (db *DB) reload() (err error) { // See if we already have the block in memory or open it otherwise. b, ok := db.getBlock(meta.ULID) if !ok { - b, err = OpenBlock(dir, db.chunkPool) + b, err = OpenBlock(dir, nil) if err != nil { return errors.Wrapf(err, "open block %s", dir) }