From d64be860f58731a88157ac304d6514e05c6f8b02 Mon Sep 17 00:00:00 2001 From: Mitar Date: Sat, 30 Dec 2023 21:06:36 +0100 Subject: [PATCH] MaxTableSize has been renamed to BaseTableSize. --- db.go | 2 +- db_test.go | 2 +- docs/content/faq/index.md | 2 +- docs/content/get-started/index.md | 2 +- options.go | 2 +- stream_writer_test.go | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/db.go b/db.go index c79fd0253..c8b8fdb40 100644 --- a/db.go +++ b/db.go @@ -162,7 +162,7 @@ func checkAndSetOptions(opt *Options) error { // the transaction APIs. Transaction batches entries into batches of size opt.maxBatchSize. if opt.ValueThreshold > opt.maxBatchSize { return errors.Errorf("Valuethreshold %d greater than max batch size of %d. Either "+ - "reduce opt.ValueThreshold or increase opt.MaxTableSize.", + "reduce opt.ValueThreshold or increase opt.BaseTableSize.", opt.ValueThreshold, opt.maxBatchSize) } // ValueLogFileSize should be stricly LESS than 2<<30 otherwise we will diff --git a/db_test.go b/db_test.go index 1e2306a20..ca2803874 100644 --- a/db_test.go +++ b/db_test.go @@ -1800,7 +1800,7 @@ func TestLSMOnly(t *testing.T) { // Also test for error, when ValueThresholdSize is greater than maxBatchSize. dopts.ValueThreshold = LSMOnlyOptions(dir).ValueThreshold - // maxBatchSize is calculated from MaxTableSize. + // maxBatchSize is calculated from BaseTableSize. dopts.MemTableSize = LSMOnlyOptions(dir).ValueThreshold _, err = Open(dopts) require.Error(t, err, "db creation should have been failed") diff --git a/docs/content/faq/index.md b/docs/content/faq/index.md index ed2e06bfb..599ec2438 100644 --- a/docs/content/faq/index.md +++ b/docs/content/faq/index.md @@ -57,7 +57,7 @@ workloads, you should be using the `Transaction` API. If you're using Badger with `SyncWrites=false`, then your writes might not be written to value log and won't get synced to disk immediately. Writes to LSM tree are done inmemory first, before they -get compacted to disk. The compaction would only happen once `MaxTableSize` has been reached. So, if +get compacted to disk. The compaction would only happen once `BaseTableSize` has been reached. So, if you're doing a few writes and then checking, you might not see anything on disk. Once you `Close` the database, you'll see these writes on disk. diff --git a/docs/content/get-started/index.md b/docs/content/get-started/index.md index 5d318fcf8..2d7b3087c 100644 --- a/docs/content/get-started/index.md +++ b/docs/content/get-started/index.md @@ -603,7 +603,7 @@ the `Options` struct that is passed in when opening the database using - If you modify `Options.NumMemtables`, also adjust `Options.NumLevelZeroTables` and `Options.NumLevelZeroTablesStall` accordingly. - Number of concurrent compactions (`Options.NumCompactors`) -- Size of table (`Options.MaxTableSize`) +- Size of table (`Options.BaseTableSize`) - Size of value log file (`Options.ValueLogFileSize`) If you want to decrease the memory usage of Badger instance, tweak these diff --git a/options.go b/options.go index ac046bc1d..8d0f0a51b 100644 --- a/options.go +++ b/options.go @@ -463,7 +463,7 @@ func (opt Options) WithLoggingLevel(val loggingLevel) Options { return opt } -// WithBaseTableSize returns a new Options value with MaxTableSize set to the given value. +// WithBaseTableSize returns a new Options value with BaseTableSize set to the given value. // // BaseTableSize sets the maximum size in bytes for LSM table or file in the base level. // diff --git a/stream_writer_test.go b/stream_writer_test.go index 4d18db8b1..6d8610df1 100644 --- a/stream_writer_test.go +++ b/stream_writer_test.go @@ -349,7 +349,7 @@ func TestStreamWriter6(t *testing.T) { } } - // list has 3 pairs for equal keys. Since each Key has size equal to MaxTableSize + // list has 3 pairs for equal keys. Since each Key has size equal to BaseTableSize // we would have 6 tables, if keys are not equal. Here we should have 3 tables. sw := db.NewStreamWriter() require.NoError(t, sw.Prepare(), "sw.Prepare() failed")