-
Notifications
You must be signed in to change notification settings - Fork 179
Avoid chunk allocations and refactor compactions #118
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -100,7 +100,7 @@ type ChunkWriter interface { | |
// must be populated. | ||
// After returning successfully, the Ref fields in the ChunkMetas | ||
// are set and can be used to retrieve the chunks from the written data. | ||
WriteChunks(chunks ...*ChunkMeta) error | ||
WriteChunks(chunks ...ChunkMeta) error | ||
|
||
// Close writes any required finalization and closes the resources | ||
// associated with the underlying writer. | ||
|
@@ -222,7 +222,7 @@ func (w *chunkWriter) write(b []byte) error { | |
return err | ||
} | ||
|
||
func (w *chunkWriter) WriteChunks(chks ...*ChunkMeta) error { | ||
func (w *chunkWriter) WriteChunks(chks ...ChunkMeta) error { | ||
// Calculate maximum space we need and cut a new segment in case | ||
// we don't fit into the current one. | ||
maxLen := int64(binary.MaxVarintLen32) // The number of chunks. | ||
|
@@ -238,23 +238,22 @@ func (w *chunkWriter) WriteChunks(chks ...*ChunkMeta) error { | |
} | ||
} | ||
|
||
b := make([]byte, binary.MaxVarintLen32) | ||
n := binary.PutUvarint(b, uint64(len(chks))) | ||
|
||
if err := w.write(b[:n]); err != nil { | ||
return err | ||
} | ||
seq := uint64(w.seq()) << 32 | ||
var ( | ||
b = [binary.MaxVarintLen32]byte{} | ||
seq = uint64(w.seq()) << 32 | ||
) | ||
for i := range chks { | ||
chk := &chks[i] | ||
|
||
for _, chk := range chks { | ||
chk.Ref = seq | uint64(w.n) | ||
|
||
n = binary.PutUvarint(b, uint64(len(chk.Chunk.Bytes()))) | ||
n := binary.PutUvarint(b[:], uint64(len(chk.Chunk.Bytes()))) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just curious how does There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We turned Anyway, |
||
|
||
if err := w.write(b[:n]); err != nil { | ||
return err | ||
} | ||
if err := w.write([]byte{byte(chk.Chunk.Encoding())}); err != nil { | ||
b[0] = byte(chk.Chunk.Encoding()) | ||
if err := w.write(b[:1]); err != nil { | ||
return err | ||
} | ||
if err := w.write(chk.Chunk.Bytes()); err != nil { | ||
|
@@ -265,7 +264,7 @@ func (w *chunkWriter) WriteChunks(chks ...*ChunkMeta) error { | |
if err := chk.writeHash(w.crc32); err != nil { | ||
return err | ||
} | ||
if err := w.write(w.crc32.Sum(nil)); err != nil { | ||
if err := w.write(w.crc32.Sum(b[:0])); err != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Curious again, how does this affect it? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same as above. |
||
return err | ||
} | ||
} | ||
|
@@ -298,15 +297,20 @@ type chunkReader struct { | |
|
||
// Closers for resources behind the byte slices. | ||
cs []io.Closer | ||
|
||
pool chunks.Pool | ||
} | ||
|
||
// newChunkReader returns a new chunkReader based on mmaped files found in dir. | ||
func newChunkReader(dir string) (*chunkReader, error) { | ||
func newChunkReader(dir string, pool chunks.Pool) (*chunkReader, error) { | ||
files, err := sequenceFiles(dir, "") | ||
if err != nil { | ||
return nil, err | ||
} | ||
var cr chunkReader | ||
if pool == nil { | ||
pool = chunks.NewPool() | ||
} | ||
cr := chunkReader{pool: pool} | ||
|
||
for _, fn := range files { | ||
f, err := openMmapFile(fn) | ||
|
@@ -353,11 +357,6 @@ func (s *chunkReader) Chunk(ref uint64) (chunks.Chunk, error) { | |
return nil, fmt.Errorf("reading chunk length failed") | ||
} | ||
b = b[n:] | ||
enc := chunks.Encoding(b[0]) | ||
|
||
c, err := chunks.FromData(enc, b[1:1+l]) | ||
if err != nil { | ||
return nil, err | ||
} | ||
return c, nil | ||
return s.pool.Get(chunks.Encoding(b[0]), b[1:1+l]) | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
How would this affect old data? The Level for the older ones would be 0 and I don't think any of it will be compacted and we could load the wrong heads.
Which is okay as we moved to "block-ranges" anyways, but I think this should be noted when we release.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We are beta... it's just a breaking change .__.