Skip to content
This repository has been archived by the owner on Aug 13, 2019. It is now read-only.

Commit

Permalink
Do not use syncPool for chunks when querying
Browse files Browse the repository at this point in the history
The only time any chunks are currently put into the syncPool is during a
compaction. This means that during almost all normal operation a new
chunk is still being created, along with the extra overhead of going
through the syncPool Get process.

Signed-off-by: Chris Marchbanks <[email protected]>
  • Loading branch information
csmarchbanks committed Nov 14, 2018
1 parent 3385571 commit b3678cc
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 10 deletions.
9 changes: 3 additions & 6 deletions chunks/chunks.go
Original file line number Diff line number Diff line change
Expand Up @@ -310,9 +310,6 @@ func newReader(bs []ByteSlice, cs []io.Closer, pool chunkenc.Pool) (*Reader, err

// NewReader returns a new chunk reader against the given byte slices.
func NewReader(bs []ByteSlice, pool chunkenc.Pool) (*Reader, error) {
if pool == nil {
pool = chunkenc.NewPool()
}
return newReader(bs, nil, pool)
}

Expand All @@ -323,9 +320,6 @@ func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) {
if err != nil {
return nil, err
}
if pool == nil {
pool = chunkenc.NewPool()
}

var bs []ByteSlice
var cs []io.Closer
Expand Down Expand Up @@ -368,6 +362,9 @@ func (s *Reader) Chunk(ref uint64) (chunkenc.Chunk, error) {
}
r = b.Range(off+n, off+n+int(l))

if s.pool == nil {
return chunkenc.FromData(chunkenc.Encoding(r[0]), r[1:1+l])
}
return s.pool.Get(chunkenc.Encoding(r[0]), r[1:1+l])
}

Expand Down
6 changes: 2 additions & 4 deletions db.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,6 @@ type DB struct {
logger log.Logger
metrics *dbMetrics
opts *Options
chunkPool chunkenc.Pool
compactor Compactor

// Mutex for that must be held when modifying the general block layout.
Expand Down Expand Up @@ -233,7 +232,6 @@ func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db
donec: make(chan struct{}),
stopc: make(chan struct{}),
compactionsEnabled: true,
chunkPool: chunkenc.NewPool(),
}
db.metrics = newDBMetrics(db, r)

Expand All @@ -249,7 +247,7 @@ func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db
db.lockf = lockf
}

db.compactor, err = NewLeveledCompactor(r, l, opts.BlockRanges, db.chunkPool)
db.compactor, err = NewLeveledCompactor(r, l, opts.BlockRanges, chunkenc.NewPool())
if err != nil {
return nil, errors.Wrap(err, "create leveled compactor")
}
Expand Down Expand Up @@ -521,7 +519,7 @@ func (db *DB) reload() (err error) {
// See if we already have the block in memory or open it otherwise.
b, ok := db.getBlock(meta.ULID)
if !ok {
b, err = OpenBlock(dir, db.chunkPool)
b, err = OpenBlock(dir, nil)
if err != nil {
return errors.Wrapf(err, "open block %s", dir)
}
Expand Down

0 comments on commit b3678cc

Please sign in to comment.