From c12c7fb6b8e75eb17bef4a394736f0548e53722d Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Sun, 9 Apr 2023 20:12:37 +0200 Subject: [PATCH 1/3] Clear cacheKV RAM if its too lareg --- store/cachekv/store.go | 59 +++++++++++++++++++++++++++--------------- 1 file changed, 38 insertions(+), 21 deletions(-) diff --git a/store/cachekv/store.go b/store/cachekv/store.go index c2b1205243a9..ec53a87efd15 100644 --- a/store/cachekv/store.go +++ b/store/cachekv/store.go @@ -93,6 +93,32 @@ func (store *Store) Delete(key []byte) { store.setCacheValue(key, nil, true) } +type cEntry struct { + key string + val *cValue +} + +func (store *Store) deleteCaches() { + if len(store.cache) > 100_000 { + // Cache is too large. We likely did something linear time + // (e.g. Epoch block, Genesis block, etc). Free the old caches from memory, and let them get re-allocated. + // TODO: In a future CacheKV redesign, such linear workloads should get into a different cache instantiation. + store.cache = make(map[string]*cValue) + store.unsortedCache = make(map[string]struct{}) + } else { + // Clear the cache using the map clearing idiom + // and not allocating fresh objects. + // Please see https://bencher.orijtech.com/perfclinic/mapclearing/ + for key := range store.cache { + delete(store.cache, key) + } + for key := range store.unsortedCache { + delete(store.unsortedCache, key) + } + } + store.sortedCache = internal.NewBTree() +} + // Implements Cachetypes.KVStore. func (store *Store) Write() { store.mtx.Lock() @@ -104,43 +130,34 @@ func (store *Store) Write() { } // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - keys := make([]string, 0, len(store.cache)) + // Not the best. To reduce RAM pressure, we copy the values as well + // and clear out the old caches right after the copy. + sortedCache := make([]cEntry, 0, len(store.cache)) for key, dbValue := range store.cache { if dbValue.dirty { - keys = append(keys, key) + sortedCache = append(sortedCache, cEntry{key, dbValue}) } } - - sort.Strings(keys) + store.deleteCaches() + sort.Slice(sortedCache, func(i, j int) bool { + return sortedCache[i].key < sortedCache[j].key + }) // TODO: Consider allowing usage of Batch, which would allow the write to // at least happen atomically. - for _, key := range keys { + for _, obj := range sortedCache { // We use []byte(key) instead of conv.UnsafeStrToBytes because we cannot // be sure if the underlying store might do a save with the byteslice or // not. Once we get confirmation that .Delete is guaranteed not to // save the byteslice, then we can assume only a read-only copy is sufficient. - cacheValue := store.cache[key] - if cacheValue.value != nil { + if obj.val.value != nil { // It already exists in the parent, hence update it. - store.parent.Set([]byte(key), cacheValue.value) + store.parent.Set([]byte(obj.key), obj.val.value) } else { - store.parent.Delete([]byte(key)) + store.parent.Delete([]byte(obj.key)) } } - - // Clear the cache using the map clearing idiom - // and not allocating fresh objects. - // Please see https://bencher.orijtech.com/perfclinic/mapclearing/ - for key := range store.cache { - delete(store.cache, key) - } - for key := range store.unsortedCache { - delete(store.unsortedCache, key) - } - store.sortedCache = internal.NewBTree() } // CacheWrap implements CacheWrapper. From 03c303b0d87848b72f0ef0e893eeb0beed7da92f Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Sun, 9 Apr 2023 20:16:42 +0200 Subject: [PATCH 2/3] Add Changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fc3843c0a30f..135520a29e48 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -95,6 +95,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ * (baseapp) [#15023](https://github.com/cosmos/cosmos-sdk/pull/15023) & [#15213](https://github.com/cosmos/cosmos-sdk/pull/15213) Add `MessageRouter` interface to baseapp and pass it to authz, gov and groups instead of concrete type. * (simtestutil) [#15305](https://github.com/cosmos/cosmos-sdk/pull/15305) Add `AppStateFnWithExtendedCb` with callback function to extend rawState. * (x/consensus) [#15553](https://github.com/cosmos/cosmos-sdk/pull/15553) Migrate consensus module to use collections +* (store/cachekv) [#15767](https://github.com/cosmos/cosmos-sdk/pull/15767) Reduce peak RAM usage during and after InitGenesis ### State Machine Breaking From 1b10a724d610e5deea7511a72d7d9d47f8a2cd91 Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Wed, 12 Apr 2023 17:46:47 +0200 Subject: [PATCH 3/3] Apply desired code cleanup --- store/cachekv/store.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/store/cachekv/store.go b/store/cachekv/store.go index ec53a87efd15..f25a4c25f123 100644 --- a/store/cachekv/store.go +++ b/store/cachekv/store.go @@ -93,16 +93,12 @@ func (store *Store) Delete(key []byte) { store.setCacheValue(key, nil, true) } -type cEntry struct { - key string - val *cValue -} - -func (store *Store) deleteCaches() { +func (store *Store) resetCaches() { if len(store.cache) > 100_000 { // Cache is too large. We likely did something linear time // (e.g. Epoch block, Genesis block, etc). Free the old caches from memory, and let them get re-allocated. // TODO: In a future CacheKV redesign, such linear workloads should get into a different cache instantiation. + // 100_000 is arbitrarily chosen as it solved Osmosis' InitGenesis RAM problem. store.cache = make(map[string]*cValue) store.unsortedCache = make(map[string]struct{}) } else { @@ -129,6 +125,11 @@ func (store *Store) Write() { return } + type cEntry struct { + key string + val *cValue + } + // We need a copy of all of the keys. // Not the best. To reduce RAM pressure, we copy the values as well // and clear out the old caches right after the copy. @@ -139,7 +140,7 @@ func (store *Store) Write() { sortedCache = append(sortedCache, cEntry{key, dbValue}) } } - store.deleteCaches() + store.resetCaches() sort.Slice(sortedCache, func(i, j int) bool { return sortedCache[i].key < sortedCache[j].key })