diff --git a/CHANGELOG.md b/CHANGELOG.md index 799a8d52f7e1..c34174701df8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -102,6 +102,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ * (baseapp) [#15023](https://github.com/cosmos/cosmos-sdk/pull/15023) & [#15213](https://github.com/cosmos/cosmos-sdk/pull/15213) Add `MessageRouter` interface to baseapp and pass it to authz, gov and groups instead of concrete type. * (simtestutil) [#15305](https://github.com/cosmos/cosmos-sdk/pull/15305) Add `AppStateFnWithExtendedCb` with callback function to extend rawState. * (x/consensus) [#15553](https://github.com/cosmos/cosmos-sdk/pull/15553) Migrate consensus module to use collections +* (store/cachekv) [#15767](https://github.com/cosmos/cosmos-sdk/pull/15767) Reduce peak RAM usage during and after InitGenesis * (x/bank) [#15764](https://github.com/cosmos/cosmos-sdk/pull/15764) Speedup x/bank InitGenesis * (x/auth) [#15867](https://github.com/cosmos/cosmos-sdk/pull/15867) Support better logging for signature verification failure. diff --git a/store/cachekv/store.go b/store/cachekv/store.go index c2b1205243a9..f25a4c25f123 100644 --- a/store/cachekv/store.go +++ b/store/cachekv/store.go @@ -93,6 +93,28 @@ func (store *Store) Delete(key []byte) { store.setCacheValue(key, nil, true) } +func (store *Store) resetCaches() { + if len(store.cache) > 100_000 { + // Cache is too large. We likely did something linear time + // (e.g. Epoch block, Genesis block, etc). Free the old caches from memory, and let them get re-allocated. + // TODO: In a future CacheKV redesign, such linear workloads should get into a different cache instantiation. + // 100_000 is arbitrarily chosen as it solved Osmosis' InitGenesis RAM problem. + store.cache = make(map[string]*cValue) + store.unsortedCache = make(map[string]struct{}) + } else { + // Clear the cache using the map clearing idiom + // and not allocating fresh objects. + // Please see https://bencher.orijtech.com/perfclinic/mapclearing/ + for key := range store.cache { + delete(store.cache, key) + } + for key := range store.unsortedCache { + delete(store.unsortedCache, key) + } + } + store.sortedCache = internal.NewBTree() +} + // Implements Cachetypes.KVStore. func (store *Store) Write() { store.mtx.Lock() @@ -103,44 +125,40 @@ func (store *Store) Write() { return } + type cEntry struct { + key string + val *cValue + } + // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - keys := make([]string, 0, len(store.cache)) + // Not the best. To reduce RAM pressure, we copy the values as well + // and clear out the old caches right after the copy. + sortedCache := make([]cEntry, 0, len(store.cache)) for key, dbValue := range store.cache { if dbValue.dirty { - keys = append(keys, key) + sortedCache = append(sortedCache, cEntry{key, dbValue}) } } - - sort.Strings(keys) + store.resetCaches() + sort.Slice(sortedCache, func(i, j int) bool { + return sortedCache[i].key < sortedCache[j].key + }) // TODO: Consider allowing usage of Batch, which would allow the write to // at least happen atomically. - for _, key := range keys { + for _, obj := range sortedCache { // We use []byte(key) instead of conv.UnsafeStrToBytes because we cannot // be sure if the underlying store might do a save with the byteslice or // not. Once we get confirmation that .Delete is guaranteed not to // save the byteslice, then we can assume only a read-only copy is sufficient. - cacheValue := store.cache[key] - if cacheValue.value != nil { + if obj.val.value != nil { // It already exists in the parent, hence update it. - store.parent.Set([]byte(key), cacheValue.value) + store.parent.Set([]byte(obj.key), obj.val.value) } else { - store.parent.Delete([]byte(key)) + store.parent.Delete([]byte(obj.key)) } } - - // Clear the cache using the map clearing idiom - // and not allocating fresh objects. - // Please see https://bencher.orijtech.com/perfclinic/mapclearing/ - for key := range store.cache { - delete(store.cache, key) - } - for key := range store.unsortedCache { - delete(store.unsortedCache, key) - } - store.sortedCache = internal.NewBTree() } // CacheWrap implements CacheWrapper.