From 26afbaf0d17977796355b6c63a5e10ed684eb670 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Sat, 30 Nov 2024 04:43:56 +0100 Subject: [PATCH 01/17] fix(store/v2): don't delete future version when calling LoadVersion (#22681) --- store/v2/commitment/iavl/tree.go | 7 +++ store/v2/commitment/iavlv2/tree.go | 4 ++ store/v2/commitment/mem/tree.go | 4 ++ store/v2/commitment/store.go | 25 ++++++++--- store/v2/commitment/tree.go | 1 + store/v2/database.go | 4 ++ store/v2/mock/db_mock.go | 14 ++++++ store/v2/root/store.go | 27 +++++++++--- store/v2/root/store_test.go | 68 ++++++++++++++++++++++++++++++ store/v2/store.go | 4 ++ 10 files changed, 147 insertions(+), 11 deletions(-) diff --git a/store/v2/commitment/iavl/tree.go b/store/v2/commitment/iavl/tree.go index 5503218e66a1..5047e8ef6ed4 100644 --- a/store/v2/commitment/iavl/tree.go +++ b/store/v2/commitment/iavl/tree.go @@ -65,6 +65,13 @@ func (t *IavlTree) WorkingHash() []byte { // LoadVersion loads the state at the given version. func (t *IavlTree) LoadVersion(version uint64) error { + _, err := t.tree.LoadVersion(int64(version)) + return err +} + +// LoadVersionForOverwriting loads the state at the given version. +// Any versions greater than targetVersion will be deleted. +func (t *IavlTree) LoadVersionForOverwriting(version uint64) error { return t.tree.LoadVersionForOverwriting(int64(version)) } diff --git a/store/v2/commitment/iavlv2/tree.go b/store/v2/commitment/iavlv2/tree.go index 997a0a60cc1a..14b7967a6c78 100644 --- a/store/v2/commitment/iavlv2/tree.go +++ b/store/v2/commitment/iavlv2/tree.go @@ -64,6 +64,10 @@ func (t *Tree) LoadVersion(version uint64) error { return t.tree.LoadVersion(int64(version)) } +func (t *Tree) LoadVersionForOverwriting(version uint64) error { + return t.LoadVersion(version) // TODO: implement overwriting +} + func (t *Tree) Commit() ([]byte, uint64, error) { h, v, err := t.tree.SaveVersion() return h, uint64(v), err diff --git a/store/v2/commitment/mem/tree.go b/store/v2/commitment/mem/tree.go index cbc28ce7d9ae..bf0e95bfa9c1 100644 --- a/store/v2/commitment/mem/tree.go +++ b/store/v2/commitment/mem/tree.go @@ -34,6 +34,10 @@ func (t *Tree) LoadVersion(version uint64) error { return nil } +func (t *Tree) LoadVersionForOverwriting(version uint64) error { + return nil +} + func (t *Tree) Commit() ([]byte, uint64, error) { return nil, 0, nil } diff --git a/store/v2/commitment/store.go b/store/v2/commitment/store.go index e9f2ee8379c7..5219255f95ca 100644 --- a/store/v2/commitment/store.go +++ b/store/v2/commitment/store.go @@ -87,7 +87,16 @@ func (c *CommitStore) LoadVersion(targetVersion uint64) error { for storeKey := range c.multiTrees { storeKeys = append(storeKeys, storeKey) } - return c.loadVersion(targetVersion, storeKeys) + return c.loadVersion(targetVersion, storeKeys, false) +} + +func (c *CommitStore) LoadVersionForOverwriting(targetVersion uint64) error { + storeKeys := make([]string, 0, len(c.multiTrees)) + for storeKey := range c.multiTrees { + storeKeys = append(storeKeys, storeKey) + } + + return c.loadVersion(targetVersion, storeKeys, true) } // LoadVersionAndUpgrade implements store.UpgradeableStore. @@ -133,10 +142,10 @@ func (c *CommitStore) LoadVersionAndUpgrade(targetVersion uint64, upgrades *core return err } - return c.loadVersion(targetVersion, newStoreKeys) + return c.loadVersion(targetVersion, newStoreKeys, true) } -func (c *CommitStore) loadVersion(targetVersion uint64, storeKeys []string) error { +func (c *CommitStore) loadVersion(targetVersion uint64, storeKeys []string, overrideAfter bool) error { // Rollback the metadata to the target version. latestVersion, err := c.GetLatestVersion() if err != nil { @@ -154,8 +163,14 @@ func (c *CommitStore) loadVersion(targetVersion uint64, storeKeys []string) erro } for _, storeKey := range storeKeys { - if err := c.multiTrees[storeKey].LoadVersion(targetVersion); err != nil { - return err + if overrideAfter { + if err := c.multiTrees[storeKey].LoadVersionForOverwriting(targetVersion); err != nil { + return err + } + } else { + if err := c.multiTrees[storeKey].LoadVersion(targetVersion); err != nil { + return err + } } } diff --git a/store/v2/commitment/tree.go b/store/v2/commitment/tree.go index f57eabd20724..58a8b20beff2 100644 --- a/store/v2/commitment/tree.go +++ b/store/v2/commitment/tree.go @@ -25,6 +25,7 @@ type Tree interface { Version() uint64 LoadVersion(version uint64) error + LoadVersionForOverwriting(version uint64) error Commit() ([]byte, uint64, error) SetInitialVersion(version uint64) error GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) diff --git a/store/v2/database.go b/store/v2/database.go index 27d0973ec18e..e3361d731024 100644 --- a/store/v2/database.go +++ b/store/v2/database.go @@ -50,6 +50,10 @@ type Committer interface { // LoadVersion loads the tree at the given version. LoadVersion(targetVersion uint64) error + // LoadVersionForOverwriting loads the tree at the given version. + // Any versions greater than targetVersion will be deleted. + LoadVersionForOverwriting(targetVersion uint64) error + // Commit commits the working tree to the database. Commit(version uint64) (*proof.CommitInfo, error) diff --git a/store/v2/mock/db_mock.go b/store/v2/mock/db_mock.go index 9b962affb102..ba65f2baf243 100644 --- a/store/v2/mock/db_mock.go +++ b/store/v2/mock/db_mock.go @@ -158,6 +158,20 @@ func (mr *MockStateCommitterMockRecorder) LoadVersionAndUpgrade(version, upgrade return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadVersionAndUpgrade", reflect.TypeOf((*MockStateCommitter)(nil).LoadVersionAndUpgrade), version, upgrades) } +// LoadVersionForOverwriting mocks base method. +func (m *MockStateCommitter) LoadVersionForOverwriting(targetVersion uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadVersionForOverwriting", targetVersion) + ret0, _ := ret[0].(error) + return ret0 +} + +// LoadVersionForOverwriting indicates an expected call of LoadVersionForOverwriting. +func (mr *MockStateCommitterMockRecorder) LoadVersionForOverwriting(targetVersion any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadVersionForOverwriting", reflect.TypeOf((*MockStateCommitter)(nil).LoadVersionForOverwriting), targetVersion) +} + // PausePruning mocks base method. func (m *MockStateCommitter) PausePruning(pause bool) { m.ctrl.T.Helper() diff --git a/store/v2/root/store.go b/store/v2/root/store.go index 59363e2fb35b..b40baef6424e 100644 --- a/store/v2/root/store.go +++ b/store/v2/root/store.go @@ -250,7 +250,7 @@ func (s *Store) LoadLatestVersion() error { return err } - return s.loadVersion(lv, nil) + return s.loadVersion(lv, nil, false) } func (s *Store) LoadVersion(version uint64) error { @@ -259,7 +259,16 @@ func (s *Store) LoadVersion(version uint64) error { defer s.telemetry.MeasureSince(now, "root_store", "load_version") } - return s.loadVersion(version, nil) + return s.loadVersion(version, nil, false) +} + +func (s *Store) LoadVersionForOverwriting(version uint64) error { + if s.telemetry != nil { + now := time.Now() + defer s.telemetry.MeasureSince(now, "root_store", "load_version_for_overwriting") + } + + return s.loadVersion(version, nil, true) } // LoadVersionAndUpgrade implements the UpgradeableStore interface. @@ -278,7 +287,7 @@ func (s *Store) LoadVersionAndUpgrade(version uint64, upgrades *corestore.StoreU return errors.New("cannot upgrade while migrating") } - if err := s.loadVersion(version, upgrades); err != nil { + if err := s.loadVersion(version, upgrades, true); err != nil { return err } @@ -294,12 +303,18 @@ func (s *Store) LoadVersionAndUpgrade(version uint64, upgrades *corestore.StoreU return nil } -func (s *Store) loadVersion(v uint64, upgrades *corestore.StoreUpgrades) error { +func (s *Store) loadVersion(v uint64, upgrades *corestore.StoreUpgrades, overrideAfter bool) error { s.logger.Debug("loading version", "version", v) if upgrades == nil { - if err := s.stateCommitment.LoadVersion(v); err != nil { - return fmt.Errorf("failed to load SC version %d: %w", v, err) + if !overrideAfter { + if err := s.stateCommitment.LoadVersion(v); err != nil { + return fmt.Errorf("failed to load SC version %d: %w", v, err) + } + } else { + if err := s.stateCommitment.LoadVersionForOverwriting(v); err != nil { + return fmt.Errorf("failed to load SC version %d: %w", v, err) + } } } else { // if upgrades are provided, we need to load the version and apply the upgrades diff --git a/store/v2/root/store_test.go b/store/v2/root/store_test.go index 59a490b11b00..8bb6b5604e2d 100644 --- a/store/v2/root/store_test.go +++ b/store/v2/root/store_test.go @@ -256,6 +256,74 @@ func (s *RootStoreTestSuite) TestLoadVersion() { s.Require().NoError(err) s.Require().Equal([]byte("val003"), val) + // attempt to write and commit a few changesets + for v := 4; v <= 5; v++ { + val := fmt.Sprintf("overwritten_val%03d", v) // overwritten_val004, overwritten_val005 + + cs := corestore.NewChangeset(uint64(v)) + cs.Add(testStoreKeyBytes, []byte("key"), []byte(val), false) + + _, err := s.rootStore.Commit(cs) + s.Require().Error(err) + } + + // ensure the latest version is correct + latest, err = s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(3), latest) // should have stayed at 3 after failed commits + + // query state and ensure values returned are based on the loaded version + _, ro, err = s.rootStore.StateLatest() + s.Require().NoError(err) + + reader, err = ro.GetReader(testStoreKeyBytes) + s.Require().NoError(err) + val, err = reader.Get([]byte("key")) + s.Require().NoError(err) + s.Require().Equal([]byte("val003"), val) +} + +func (s *RootStoreTestSuite) TestLoadVersionForOverwriting() { + // write and commit a few changesets + for v := uint64(1); v <= 5; v++ { + val := fmt.Sprintf("val%03d", v) // val001, val002, ..., val005 + + cs := corestore.NewChangeset(v) + cs.Add(testStoreKeyBytes, []byte("key"), []byte(val), false) + + commitHash, err := s.rootStore.Commit(cs) + s.Require().NoError(err) + s.Require().NotNil(commitHash) + } + + // ensure the latest version is correct + latest, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(5), latest) + + // attempt to load a non-existent version + err = s.rootStore.LoadVersionForOverwriting(6) + s.Require().Error(err) + + // attempt to load a previously committed version + err = s.rootStore.LoadVersionForOverwriting(3) + s.Require().NoError(err) + + // ensure the latest version is correct + latest, err = s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(3), latest) + + // query state and ensure values returned are based on the loaded version + _, ro, err := s.rootStore.StateLatest() + s.Require().NoError(err) + + reader, err := ro.GetReader(testStoreKeyBytes) + s.Require().NoError(err) + val, err := reader.Get([]byte("key")) + s.Require().NoError(err) + s.Require().Equal([]byte("val003"), val) + // attempt to write and commit a few changesets for v := 4; v <= 5; v++ { val := fmt.Sprintf("overwritten_val%03d", v) // overwritten_val004, overwritten_val005 diff --git a/store/v2/store.go b/store/v2/store.go index 124d7de579a1..bf967d0f78a6 100644 --- a/store/v2/store.go +++ b/store/v2/store.go @@ -30,6 +30,10 @@ type RootStore interface { // LoadVersion loads the RootStore to the given version. LoadVersion(version uint64) error + // LoadVersionForOverwriting loads the state at the given version. + // Any versions greater than targetVersion will be deleted. + LoadVersionForOverwriting(version uint64) error + // LoadLatestVersion behaves identically to LoadVersion except it loads the // latest version implicitly. LoadLatestVersion() error From 40430febc55b7b728cb8da65a16ba2c77cfbdafe Mon Sep 17 00:00:00 2001 From: Matt Kocubinski Date: Sun, 1 Dec 2024 12:49:31 -0600 Subject: [PATCH 02/17] fix(store/v2): iavl/v2 reverse iterator (#22699) Co-authored-by: Marko --- store/v2/commitment/iavlv2/tree.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/store/v2/commitment/iavlv2/tree.go b/store/v2/commitment/iavlv2/tree.go index 14b7967a6c78..43c25a23a2c8 100644 --- a/store/v2/commitment/iavlv2/tree.go +++ b/store/v2/commitment/iavlv2/tree.go @@ -113,7 +113,11 @@ func (t *Tree) Iterator(version uint64, start, end []byte, ascending bool) (core if int64(version) != t.tree.Version() { return nil, fmt.Errorf("loading past version not yet supported") } - return t.tree.Iterator(start, end, ascending) + if ascending { + return t.tree.Iterator(start, end, false) + } else { + return t.tree.ReverseIterator(start, end) + } } func (t *Tree) Export(version uint64) (commitment.Exporter, error) { From c207a45051b37959ca1046f31af1cf5bc12175a1 Mon Sep 17 00:00:00 2001 From: mmsqe Date: Mon, 2 Dec 2024 15:59:29 +0800 Subject: [PATCH 03/17] fix(simapp/v2): add binary version cmd (#22705) --- scripts/build/build.mk | 4 +++- simapp/v2/simdv2/cmd/commands.go | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/build/build.mk b/scripts/build/build.mk index 3537c3f23123..92f7b6a20e9f 100644 --- a/scripts/build/build.mk +++ b/scripts/build/build.mk @@ -7,6 +7,7 @@ LEDGER_ENABLED ?= true BINDIR ?= $(GOPATH)/bin BUILDDIR ?= $(CURDIR)/build SIMAPP = simapp +APPNAME = simd MOCKS_DIR = $(CURDIR)/tests/mocks HTTPS_GIT := https://github.com/cosmos/cosmos-sdk.git DOCKER := $(shell which docker) @@ -53,6 +54,7 @@ endif ifeq (v2,$(findstring v2,$(COSMOS_BUILD_OPTIONS))) SIMAPP = simapp/v2 + APPNAME = simdv2 endif # DB backend selection @@ -86,7 +88,7 @@ build_tags_comma_sep := $(subst $(whitespace),$(comma),$(build_tags)) # process linker flags ldflags = -X github.com/cosmos/cosmos-sdk/version.Name=sim \ - -X github.com/cosmos/cosmos-sdk/version.AppName=simd \ + -X github.com/cosmos/cosmos-sdk/version.AppName=$(APPNAME) \ -X github.com/cosmos/cosmos-sdk/version.Version=$(VERSION) \ -X github.com/cosmos/cosmos-sdk/version.Commit=$(COMMIT) \ -X "github.com/cosmos/cosmos-sdk/version.BuildTags=$(build_tags_comma_sep)" diff --git a/simapp/v2/simdv2/cmd/commands.go b/simapp/v2/simdv2/cmd/commands.go index 1edd3052ec4c..9b587e6dc477 100644 --- a/simapp/v2/simdv2/cmd/commands.go +++ b/simapp/v2/simdv2/cmd/commands.go @@ -26,6 +26,7 @@ import ( "github.com/cosmos/cosmos-sdk/client/rpc" sdktelemetry "github.com/cosmos/cosmos-sdk/telemetry" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" authcmd "github.com/cosmos/cosmos-sdk/x/auth/client/cli" "github.com/cosmos/cosmos-sdk/x/genutil" genutilcli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli" @@ -65,6 +66,7 @@ func InitRootCmd[T transaction.Tx]( txCommand(), keys.Commands(), offchain.OffChain(), + version.NewVersionCommand(), ) // build CLI skeleton for initial config parsing or a client application invocation From e7fe651b4900bdf768accab87f354c6272f9357c Mon Sep 17 00:00:00 2001 From: mmsqe Date: Mon, 2 Dec 2024 18:32:49 +0800 Subject: [PATCH 04/17] fix(simapp/v2): failed to start HTTP server on port 8080 conflict (#22687) --- simapp/v2/simdv2/cmd/testnet.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/simapp/v2/simdv2/cmd/testnet.go b/simapp/v2/simdv2/cmd/testnet.go index ed5786fb668c..dab0f327d037 100644 --- a/simapp/v2/simdv2/cmd/testnet.go +++ b/simapp/v2/simdv2/cmd/testnet.go @@ -20,6 +20,7 @@ import ( runtimev2 "cosmossdk.io/runtime/v2" serverv2 "cosmossdk.io/server/v2" "cosmossdk.io/server/v2/api/grpc" + "cosmossdk.io/server/v2/api/rest" "cosmossdk.io/server/v2/cometbft" "cosmossdk.io/server/v2/store" banktypes "cosmossdk.io/x/bank/types" @@ -184,6 +185,7 @@ func initTestnetFiles[T transaction.Tx]( rpcPort = 26657 apiPort = 1317 grpcPort = 9090 + restPort = 8080 ) p2pPortStart := 26656 @@ -192,6 +194,7 @@ func initTestnetFiles[T transaction.Tx]( for i := 0; i < args.numValidators; i++ { var portOffset int grpcConfig := grpc.DefaultConfig() + restConfig := rest.DefaultConfig() if args.singleMachine { portOffset = i p2pPortStart = 16656 // use different start point to not conflict with rpc port @@ -205,6 +208,11 @@ func initTestnetFiles[T transaction.Tx]( MaxRecvMsgSize: grpc.DefaultConfig().MaxRecvMsgSize, MaxSendMsgSize: grpc.DefaultConfig().MaxSendMsgSize, } + + restConfig = &rest.Config{ + Enable: true, + Address: fmt.Sprintf("127.0.0.1:%d", restPort+portOffset), + } } nodeDirName := fmt.Sprintf("%s%d", args.nodeDirPrefix, i) @@ -338,7 +346,8 @@ func initTestnetFiles[T transaction.Tx]( cometServer := cometbft.NewWithConfigOptions[T](cometbft.OverwriteDefaultConfigTomlConfig(nodeConfig)) storeServer := &store.Server[T]{} grpcServer := grpc.NewWithConfigOptions[T](grpc.OverwriteDefaultConfig(grpcConfig)) - server := serverv2.NewServer[T](serverCfg, cometServer, storeServer, grpcServer) + restServer := rest.NewWithConfigOptions[T](rest.OverwriteDefaultConfig(restConfig)) + server := serverv2.NewServer[T](serverCfg, cometServer, storeServer, grpcServer, restServer) err = server.WriteConfig(filepath.Join(nodeDir, "config")) if err != nil { return err From f350775d0ed21ccfa36643698215dc612281bb16 Mon Sep 17 00:00:00 2001 From: mmsqe Date: Mon, 2 Dec 2024 19:51:47 +0800 Subject: [PATCH 05/17] feat(server/v2/grpcgateway): register grpcgateway server and module endpoints (#22701) Co-authored-by: Julien Robert --- server/v2/api/grpcgateway/server.go | 17 ++++++++++------- simapp/v2/simdv2/cmd/commands.go | 20 ++++++++++++++++++++ simapp/v2/simdv2/cmd/testnet.go | 11 ++++++++++- 3 files changed, 40 insertions(+), 8 deletions(-) diff --git a/server/v2/api/grpcgateway/server.go b/server/v2/api/grpcgateway/server.go index 7fba8ce1be20..16a622a94961 100644 --- a/server/v2/api/grpcgateway/server.go +++ b/server/v2/api/grpcgateway/server.go @@ -9,7 +9,6 @@ import ( gateway "github.com/cosmos/gogogateway" "github.com/cosmos/gogoproto/jsonpb" "github.com/grpc-ecosystem/grpc-gateway/runtime" - "google.golang.org/grpc" "cosmossdk.io/core/server" "cosmossdk.io/core/transaction" @@ -30,15 +29,13 @@ type Server[T transaction.Tx] struct { cfgOptions []CfgOption server *http.Server - gRPCSrv *grpc.Server - gRPCGatewayRouter *runtime.ServeMux + GRPCGatewayRouter *runtime.ServeMux } // New creates a new gRPC-gateway server. func New[T transaction.Tx]( logger log.Logger, config server.ConfigMap, - grpcSrv *grpc.Server, ir jsonpb.AnyResolver, cfgOptions ...CfgOption, ) (*Server[T], error) { @@ -52,8 +49,7 @@ func New[T transaction.Tx]( } s := &Server[T]{ - gRPCSrv: grpcSrv, - gRPCGatewayRouter: runtime.NewServeMux( + GRPCGatewayRouter: runtime.NewServeMux( // Custom marshaler option is required for gogo proto runtime.WithMarshalerOption(runtime.MIMEWildcard, marshalerOption), @@ -83,6 +79,13 @@ func New[T transaction.Tx]( return s, nil } +// NewWithConfigOptions creates a new gRPC-gateway server with the provided config options. +func NewWithConfigOptions[T transaction.Tx](opts ...CfgOption) *Server[T] { + return &Server[T]{ + cfgOptions: opts, + } +} + func (s *Server[T]) Name() string { return ServerName } @@ -108,7 +111,7 @@ func (s *Server[T]) Start(ctx context.Context) error { } mux := http.NewServeMux() - mux.Handle("/", s.gRPCGatewayRouter) + mux.Handle("/", s.GRPCGatewayRouter) s.server = &http.Server{ Addr: s.config.Address, diff --git a/simapp/v2/simdv2/cmd/commands.go b/simapp/v2/simdv2/cmd/commands.go index 9b587e6dc477..a2eea8b49fbc 100644 --- a/simapp/v2/simdv2/cmd/commands.go +++ b/simapp/v2/simdv2/cmd/commands.go @@ -12,6 +12,7 @@ import ( runtimev2 "cosmossdk.io/runtime/v2" serverv2 "cosmossdk.io/server/v2" grpcserver "cosmossdk.io/server/v2/api/grpc" + "cosmossdk.io/server/v2/api/grpcgateway" "cosmossdk.io/server/v2/api/rest" "cosmossdk.io/server/v2/api/telemetry" "cosmossdk.io/server/v2/cometbft" @@ -26,6 +27,7 @@ import ( "github.com/cosmos/cosmos-sdk/client/rpc" sdktelemetry "github.com/cosmos/cosmos-sdk/telemetry" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" "github.com/cosmos/cosmos-sdk/version" authcmd "github.com/cosmos/cosmos-sdk/x/auth/client/cli" "github.com/cosmos/cosmos-sdk/x/genutil" @@ -85,6 +87,7 @@ func InitRootCmd[T transaction.Tx]( &serverstore.Server[T]{}, &telemetry.Server[T]{}, &rest.Server[T]{}, + &grpcgateway.Server[T]{}, ) } @@ -142,6 +145,22 @@ func InitRootCmd[T transaction.Tx]( return nil, err } + grpcgatewayServer, err := grpcgateway.New[T]( + logger, + deps.GlobalConfig, + simApp.InterfaceRegistry(), + ) + if err != nil { + return nil, err + } + + for _, mod := range deps.ModuleManager.Modules() { + if gmod, ok := mod.(module.HasGRPCGateway); ok { + // TODO(@julienrbrt) https://github.com/cosmos/cosmos-sdk/pull/22701#pullrequestreview-2470651390 + gmod.RegisterGRPCGatewayRoutes(deps.ClientContext, grpcgatewayServer.GRPCGatewayRouter) + } + } + // wire server commands return serverv2.AddCommands[T]( rootCmd, @@ -154,6 +173,7 @@ func InitRootCmd[T transaction.Tx]( storeComponent, telemetryServer, restServer, + grpcgatewayServer, ) } diff --git a/simapp/v2/simdv2/cmd/testnet.go b/simapp/v2/simdv2/cmd/testnet.go index dab0f327d037..657d37f0f532 100644 --- a/simapp/v2/simdv2/cmd/testnet.go +++ b/simapp/v2/simdv2/cmd/testnet.go @@ -20,6 +20,7 @@ import ( runtimev2 "cosmossdk.io/runtime/v2" serverv2 "cosmossdk.io/server/v2" "cosmossdk.io/server/v2/api/grpc" + "cosmossdk.io/server/v2/api/grpcgateway" "cosmossdk.io/server/v2/api/rest" "cosmossdk.io/server/v2/cometbft" "cosmossdk.io/server/v2/store" @@ -194,7 +195,9 @@ func initTestnetFiles[T transaction.Tx]( for i := 0; i < args.numValidators; i++ { var portOffset int grpcConfig := grpc.DefaultConfig() + grpcgatewayConfig := grpcgateway.DefaultConfig() restConfig := rest.DefaultConfig() + if args.singleMachine { portOffset = i p2pPortStart = 16656 // use different start point to not conflict with rpc port @@ -209,6 +212,11 @@ func initTestnetFiles[T transaction.Tx]( MaxSendMsgSize: grpc.DefaultConfig().MaxSendMsgSize, } + grpcgatewayConfig = &grpcgateway.Config{ + Enable: true, + Address: fmt.Sprintf("127.0.0.1:%d", apiPort+portOffset), + } + restConfig = &rest.Config{ Enable: true, Address: fmt.Sprintf("127.0.0.1:%d", restPort+portOffset), @@ -346,8 +354,9 @@ func initTestnetFiles[T transaction.Tx]( cometServer := cometbft.NewWithConfigOptions[T](cometbft.OverwriteDefaultConfigTomlConfig(nodeConfig)) storeServer := &store.Server[T]{} grpcServer := grpc.NewWithConfigOptions[T](grpc.OverwriteDefaultConfig(grpcConfig)) + grpcgatewayServer := grpcgateway.NewWithConfigOptions[T](grpcgateway.OverwriteDefaultConfig(grpcgatewayConfig)) restServer := rest.NewWithConfigOptions[T](rest.OverwriteDefaultConfig(restConfig)) - server := serverv2.NewServer[T](serverCfg, cometServer, storeServer, grpcServer, restServer) + server := serverv2.NewServer[T](serverCfg, cometServer, storeServer, grpcServer, grpcgatewayServer, restServer) err = server.WriteConfig(filepath.Join(nodeDir, "config")) if err != nil { return err From 9d9c19c0f854c203ba1a202846eb9067124d4eff Mon Sep 17 00:00:00 2001 From: Marko Date: Mon, 2 Dec 2024 18:34:30 +0100 Subject: [PATCH 06/17] fix(crypto): bls compilation (#22717) --- .github/workflows/build.yml | 6 ++++++ crypto/keys/bls12_381/key_cgo.go | 13 +------------ scripts/build/build.mk | 6 ------ 3 files changed, 7 insertions(+), 18 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2911e9d98952..e6c6f4ee640c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -55,6 +55,12 @@ jobs: - name: Build with rocksdb backend if: matrix.go-arch == 'amd64' run: GOARCH=${{ matrix.go-arch }} COSMOS_BUILD_OPTIONS="rocksdb" make build + - name: Build with BLS12381 + if: matrix.go-arch == 'amd64' + run: GOARCH=${{ matrix.go-arch }} COSMOS_BUILD_OPTIONS="bls12381" make build + - name: Build with Secp_cgo + if: matrix.go-arch == 'amd64' + run: GOARCH=${{ matrix.go-arch }} COSMOS_BUILD_OPTIONS="secp" make build ################### ## Build Tooling ## ################### diff --git a/crypto/keys/bls12_381/key_cgo.go b/crypto/keys/bls12_381/key_cgo.go index 2470e2c6ab6b..86c6a6644690 100644 --- a/crypto/keys/bls12_381/key_cgo.go +++ b/crypto/keys/bls12_381/key_cgo.go @@ -4,13 +4,12 @@ package bls12_381 import ( "bytes" - "crypto/sha256" "errors" "fmt" "github.com/cometbft/cometbft/crypto" - "github.com/cometbft/cometbft/crypto/tmhash" "github.com/cometbft/cometbft/crypto/bls12381" + "github.com/cometbft/cometbft/crypto/tmhash" "github.com/cosmos/cosmos-sdk/codec" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" @@ -84,11 +83,6 @@ func (privKey PrivKey) Sign(msg []byte) ([]byte, error) { return nil, err } - if len(msg) > bls12381.MaxMsgLen { - hash := sha256.Sum256(msg) - return secretKey.Sign(hash[:]) - } - return secretKey.Sign(msg) } @@ -151,11 +145,6 @@ func (pubKey PubKey) VerifySignature(msg, sig []byte) bool { return false } - if len(msg) > bls12381.MaxMsgLen { - hash := sha256.Sum256(msg) - msg = hash[:] - } - return pubK.VerifySignature(msg, sig) } diff --git a/scripts/build/build.mk b/scripts/build/build.mk index 92f7b6a20e9f..4727b05e98a3 100644 --- a/scripts/build/build.mk +++ b/scripts/build/build.mk @@ -58,12 +58,6 @@ ifeq (v2,$(findstring v2,$(COSMOS_BUILD_OPTIONS))) endif # DB backend selection -ifeq (cleveldb,$(findstring cleveldb,$(COSMOS_BUILD_OPTIONS))) - build_tags += gcc -endif -ifeq (badgerdb,$(findstring badgerdb,$(COSMOS_BUILD_OPTIONS))) - build_tags += badgerdb -endif # handle rocksdb ifeq (rocksdb,$(findstring rocksdb,$(COSMOS_BUILD_OPTIONS))) CGO_ENABLED=1 From 1c4dc89ead78bf749818e84318c0529354d9964a Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Tue, 3 Dec 2024 09:07:32 +0100 Subject: [PATCH 07/17] chore(schema): rename `ReferenceType` to `ReferenceableType` (#22698) --- schema/diff/field_diff.go | 12 ++++++------ schema/diff/field_diff_test.go | 2 +- schema/enum.go | 4 ++-- schema/type.go | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/schema/diff/field_diff.go b/schema/diff/field_diff.go index ffc6c71ed1a4..8b2aaacaa144 100644 --- a/schema/diff/field_diff.go +++ b/schema/diff/field_diff.go @@ -3,7 +3,7 @@ package diff import "cosmossdk.io/schema" // FieldDiff represents the difference between two fields. -// The KindChanged, NullableChanged, and ReferenceTypeChanged methods can be used to determine +// The KindChanged, NullableChanged, and ReferenceableTypeChanged methods can be used to determine // what specific changes were made to the field. type FieldDiff struct { // Name is the name of the field. @@ -22,11 +22,11 @@ type FieldDiff struct { NewNullable bool // OldReferencedType is the name of the old referenced type. - // It will be empty if the field is not a reference type or if there was no change. + // It will be empty if the field is not a referenceable type or if there was no change. OldReferencedType string // NewReferencedType is the name of the new referenced type. - // It will be empty if the field is not a reference type or if there was no change. + // It will be empty if the field is not a referenceable type or if there was no change. NewReferencedType string } @@ -52,7 +52,7 @@ func compareField(oldField, newField schema.Field) FieldDiff { // Empty returns true if the field diff has no changes. func (d FieldDiff) Empty() bool { - return !d.KindChanged() && !d.NullableChanged() && !d.ReferenceTypeChanged() + return !d.KindChanged() && !d.NullableChanged() && !d.ReferenceableTypeChanged() } // KindChanged returns true if the field kind changed. @@ -65,7 +65,7 @@ func (d FieldDiff) NullableChanged() bool { return d.OldNullable != d.NewNullable } -// ReferenceTypeChanged returns true if the referenced type changed. -func (d FieldDiff) ReferenceTypeChanged() bool { +// ReferenceableTypeChanged returns true if the referenced type changed. +func (d FieldDiff) ReferenceableTypeChanged() bool { return d.OldReferencedType != d.NewReferencedType } diff --git a/schema/diff/field_diff_test.go b/schema/diff/field_diff_test.go index d584aae5a48c..925673820b53 100644 --- a/schema/diff/field_diff_test.go +++ b/schema/diff/field_diff_test.go @@ -45,7 +45,7 @@ func Test_compareField(t *testing.T) { OldReferencedType: "old", NewReferencedType: "new", }, - trueF: FieldDiff.ReferenceTypeChanged, + trueF: FieldDiff.ReferenceableTypeChanged, }, } diff --git a/schema/enum.go b/schema/enum.go index b52b01e1a522..c802eac875f5 100644 --- a/schema/enum.go +++ b/schema/enum.go @@ -41,8 +41,8 @@ func (e EnumType) TypeName() string { return e.Name } -func (EnumType) isType() {} -func (EnumType) isReferenceType() {} +func (EnumType) isType() {} +func (EnumType) isReferenceableType() {} // Validate validates the enum definition. func (e EnumType) Validate(TypeSet) error { diff --git a/schema/type.go b/schema/type.go index dfbc839ae0d4..bb02ea329e98 100644 --- a/schema/type.go +++ b/schema/type.go @@ -13,13 +13,13 @@ type Type interface { isType() } -// ReferenceType is a marker interface that all types that can be the target of Field.ReferencedType implement. +// ReferenceableType is a marker interface that all types that can be the target of Field.ReferencedType implement. // Currently, this is only EnumType. -type ReferenceType interface { +type ReferenceableType interface { Type // isReferenceType is implemented if this is a reference type. - isReferenceType() + isReferenceableType() } // TypeSet represents something that has types and allows them to be looked up by name. From 4d1adcf9552d63b47a997d3d3ca0391f196f2b09 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Tue, 3 Dec 2024 09:32:13 +0100 Subject: [PATCH 08/17] fix(x/auth): facultative vesting as well in simulation (#22721) --- x/auth/simulation/genesis.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/x/auth/simulation/genesis.go b/x/auth/simulation/genesis.go index 7c18882fd325..0e9f7e93eb4a 100644 --- a/x/auth/simulation/genesis.go +++ b/x/auth/simulation/genesis.go @@ -26,6 +26,13 @@ func RandomGenesisAccounts(simState *module.SimulationState) types.GenesisAccoun for i, acc := range simState.Accounts { bacc := types.NewBaseAccountWithAddress(acc.Address) + // check if vesting module is enabled + // if not, just use base account + if _, ok := simState.GenState["vesting"]; !ok { + genesisAccs[i] = bacc + continue + } + // Only consider making a vesting account once the initial bonded validator // set is exhausted due to needing to track DelegatedVesting. if !(int64(i) > simState.NumBonded && simState.Rand.Intn(100) < 50) { From 78c8057a1869148ea4f99bc1a279b4bd73699196 Mon Sep 17 00:00:00 2001 From: Dmytrol <46675332+Dimitrolito@users.noreply.github.com> Date: Tue, 3 Dec 2024 11:10:51 +0200 Subject: [PATCH 09/17] docs: fix typos in various documentation files (#22722) Co-authored-by: Julien Robert Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- CODING_GUIDELINES.md | 2 +- RELEASE_PROCESS.md | 4 ++-- ROADMAP.md | 4 ++-- docs/architecture/adr-008-dCERT-group.md | 2 +- docs/architecture/adr-033-protobuf-inter-module-comm.md | 2 +- docs/architecture/adr-049-state-sync-hooks.md | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/CODING_GUIDELINES.md b/CODING_GUIDELINES.md index 59cd1e25e9b6..1e6d2a71270b 100644 --- a/CODING_GUIDELINES.md +++ b/CODING_GUIDELINES.md @@ -148,7 +148,7 @@ Desired outcomes: As a developer, you must help the QA team by providing instructions for User Experience (UX) and functional testing. -### QA Team to cross check Acceptance Tests +### QA Team to cross-check Acceptance Tests Once the AT are defined, the QA team will have an overview of the behavior a user can expect and: diff --git a/RELEASE_PROCESS.md b/RELEASE_PROCESS.md index e676be0d4ab1..d3b773e8dce4 100644 --- a/RELEASE_PROCESS.md +++ b/RELEASE_PROCESS.md @@ -27,7 +27,7 @@ v1.0.0-beta1 → v1.0.0-beta2 → ... → v1.0.0-rc1 → v1.0.0-rc2 → ... → * All links must point to their respective pull request. * The `CHANGELOG.md` must contain only the changes of that specific released version. All other changelog entries must be deleted and linked to the `main` branch changelog ([example](https://github.com/cosmos/cosmos-sdk/blob/release/v0.46.x/CHANGELOG.md#previous-versions)). * Create release notes, in `RELEASE_NOTES.md`, highlighting the new features and changes in the version. This is needed so the bot knows which entries to add to the release page on GitHub. - * Additionally verify that the `UPGRADING.md` file is up to date and contains all the necessary information for upgrading to the new version. + * Additionally verify that the `UPGRADING.md` file is up-to-date and contains all the necessary information for upgrading to the new version. * Remove GitHub workflows that should not be in the release branch * `test.yml`: All standalone go module tests should be removed (expect `./simapp`, and `./tests`, SDK and modules tests). * These packages are tracked and tested directly on main. @@ -57,7 +57,7 @@ A _patch release_ is an increment of the patch number (eg: `v1.2.0` → `v1.2.1` **Patch release must not break API nor consensus.** -Updates to the release branch should come from `main` by backporting PRs (usually done by automatic cherry pick followed by a PRs to the release branch). The backports must be marked using `backport/Y` label in PR for main. +Updates to the release branch should come from `main` by backporting PRs (usually done by automatic cherry-pick followed by PRs to the release branch). The backports must be marked using `backport/Y` label in PR for main. It is the PR author's responsibility to fix merge conflicts, update changelog entries, and ensure CI passes. If a PR originates from an external contributor, a core team member assumes responsibility to perform this process instead of the original author. diff --git a/ROADMAP.md b/ROADMAP.md index 86bd7e25c528..cc59ca813fbf 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -145,7 +145,7 @@ Issue: https://github.com/cosmos/iavl/issues/548 * Toolkit/SDK ADR. * Objective: - * Produce a RFC/ADR on how to make core composable + * Produce an RFC/ADR on how to make core composable * Merge RFC/ADR into main * Progress: * on pause until abci 2.0 integration is completed @@ -267,4 +267,4 @@ Issue: https://github.com/cosmos/iavl/issues/548 -This document will be updated at the end of the quarter on what was achieved and what was not. Shortly before the quarter concludes a new section will be added for the next quarter. We are working on updating the complete one year roadmap and will be posting it here as well. +This document will be updated at the end of the quarter on what was achieved and what was not. Shortly before the quarter concludes a new section will be added for the next quarter. We are working on updating the complete one-year roadmap and will be posting it here as well. diff --git a/docs/architecture/adr-008-dCERT-group.md b/docs/architecture/adr-008-dCERT-group.md index 13d2b340a667..509eb36234b4 100644 --- a/docs/architecture/adr-008-dCERT-group.md +++ b/docs/architecture/adr-008-dCERT-group.md @@ -101,7 +101,7 @@ mechanism. If those tokens are unbonded then the dCERT member must be automatically kicked from the group. Slashing of a particular dCERT member due to soft-contract breach should be -performed by governance on a per member basis based on the magnitude of the +performed by governance on a per-member basis based on the magnitude of the breach. The process flow is anticipated to be that a dCERT member is suspended by the dCERT group prior to being slashed by governance. diff --git a/docs/architecture/adr-033-protobuf-inter-module-comm.md b/docs/architecture/adr-033-protobuf-inter-module-comm.md index 4f2769e6770f..1929975c9a27 100644 --- a/docs/architecture/adr-033-protobuf-inter-module-comm.md +++ b/docs/architecture/adr-033-protobuf-inter-module-comm.md @@ -154,7 +154,7 @@ func (foo *FooMsgServer) Bar(ctx context.Context, req *MsgBarRequest) (*MsgBarRe } ``` -This design is also intended to be extensible to cover use cases of more fine grained permissioning like minting by +This design is also intended to be extensible to cover use cases of more fine-grained permissioning like minting by denom prefix being restricted to certain modules (as discussed in [#7459](https://github.com/cosmos/cosmos-sdk/pull/7459#discussion_r529545528)). diff --git a/docs/architecture/adr-049-state-sync-hooks.md b/docs/architecture/adr-049-state-sync-hooks.md index 50c551ea2b8d..5a896613cbb9 100644 --- a/docs/architecture/adr-049-state-sync-hooks.md +++ b/docs/architecture/adr-049-state-sync-hooks.md @@ -116,7 +116,7 @@ type ExtensionPayloadReader = func() ([]byte, error) type ExtensionPayloadWriter = func([]byte) error // ExtensionSnapshotter is an extension Snapshotter that is appended to the snapshot stream. -// ExtensionSnapshotter has an unique name and manages it's own internal formats. +// ExtensionSnapshotter has a unique name and manages it's own internal formats. type ExtensionSnapshotter interface { // SnapshotName returns the name of snapshotter, it should be unique in the manager. SnapshotName() string From 6a52694ef7b8cb3e04bb51687faac0b81e5036ac Mon Sep 17 00:00:00 2001 From: son trinh Date: Tue, 3 Dec 2024 16:45:15 +0700 Subject: [PATCH 10/17] refactor(tests/integration): Port distribution integration tests to server v2 (#22667) --- .../distribution/keeper/common_test.go | 36 -- tests/integration/distribution/module_test.go | 31 -- tests/integration/v2/app.go | 15 +- tests/integration/v2/auth/app_test.go | 3 + .../v2/distribution/common_test.go | 29 ++ .../v2/distribution/fixture_test.go | 160 +++++++ .../distribution}/grpc_query_test.go | 76 +-- .../v2/distribution/module_test.go | 15 + .../distribution}/msg_server_test.go | 442 ++++++------------ tests/integration/v2/services.go | 14 + testutil/configurator/configurator.go | 14 +- x/staking/testutil/helpers.go | 14 +- 12 files changed, 445 insertions(+), 404 deletions(-) delete mode 100644 tests/integration/distribution/keeper/common_test.go delete mode 100644 tests/integration/distribution/module_test.go create mode 100644 tests/integration/v2/distribution/common_test.go create mode 100644 tests/integration/v2/distribution/fixture_test.go rename tests/integration/{distribution/keeper => v2/distribution}/grpc_query_test.go (84%) create mode 100644 tests/integration/v2/distribution/module_test.go rename tests/integration/{distribution/keeper => v2/distribution}/msg_server_test.go (58%) diff --git a/tests/integration/distribution/keeper/common_test.go b/tests/integration/distribution/keeper/common_test.go deleted file mode 100644 index e298225b7470..000000000000 --- a/tests/integration/distribution/keeper/common_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package keeper_test - -import ( - "testing" - - "gotest.tools/v3/assert" - - "cosmossdk.io/math" - "cosmossdk.io/x/distribution/types" - stakingtestutil "cosmossdk.io/x/staking/testutil" - stakingtypes "cosmossdk.io/x/staking/types" - - simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -var ( - PKS = simtestutil.CreateTestPubKeys(3) - - valConsPk0 = PKS[0] -) - -func setupValidatorWithCommission(t *testing.T, f *fixture, valAddr sdk.ValAddress, initialStake int64) { - t.Helper() - initTokens := f.stakingKeeper.TokensFromConsensusPower(f.sdkCtx, int64(1000)) - assert.NilError(t, f.bankKeeper.MintCoins(f.sdkCtx, types.ModuleName, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens)))) - assert.NilError(t, f.stakingKeeper.Params.Set(f.sdkCtx, stakingtypes.DefaultParams())) - - funds := f.stakingKeeper.TokensFromConsensusPower(f.sdkCtx, int64(1000)) - assert.NilError(t, f.bankKeeper.SendCoinsFromModuleToAccount(f.sdkCtx, types.ModuleName, sdk.AccAddress(valAddr), sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, funds)))) - f.accountKeeper.SetAccount(f.sdkCtx, f.accountKeeper.NewAccountWithAddress(f.sdkCtx, sdk.AccAddress(valAddr))) - - tstaking := stakingtestutil.NewHelper(t, f.sdkCtx, f.stakingKeeper) - tstaking.Commission = stakingtypes.NewCommissionRates(math.LegacyNewDecWithPrec(5, 1), math.LegacyNewDecWithPrec(5, 1), math.LegacyNewDec(0)) - tstaking.CreateValidator(valAddr, valConsPk0, math.NewInt(initialStake), true) -} diff --git a/tests/integration/distribution/module_test.go b/tests/integration/distribution/module_test.go deleted file mode 100644 index ce6e902c6b7c..000000000000 --- a/tests/integration/distribution/module_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package distribution_test - -import ( - "testing" - - "gotest.tools/v3/assert" - - "cosmossdk.io/depinject" - "cosmossdk.io/log" - "cosmossdk.io/x/distribution/types" - - simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" - authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" - authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" -) - -func TestItCreatesModuleAccountOnInitBlock(t *testing.T) { - var accountKeeper authkeeper.AccountKeeper - - app, err := simtestutil.SetupAtGenesis( - depinject.Configs( - AppConfig, - depinject.Supply(log.NewNopLogger()), - ), - &accountKeeper) - assert.NilError(t, err) - - ctx := app.BaseApp.NewContext(false) - acc := accountKeeper.GetAccount(ctx, authtypes.NewModuleAddress(types.ModuleName)) - assert.Assert(t, acc != nil) -} diff --git a/tests/integration/v2/app.go b/tests/integration/v2/app.go index 0554f9b148d8..257fc3c700b5 100644 --- a/tests/integration/v2/app.go +++ b/tests/integration/v2/app.go @@ -17,6 +17,7 @@ import ( corebranch "cosmossdk.io/core/branch" "cosmossdk.io/core/comet" corecontext "cosmossdk.io/core/context" + "cosmossdk.io/core/header" "cosmossdk.io/core/server" corestore "cosmossdk.io/core/store" "cosmossdk.io/core/transaction" @@ -96,6 +97,8 @@ type StartupConfig struct { // RouterServiceBuilder defines the custom builder // for msg router and query router service to be used in the app. RouterServiceBuilder runtime.RouterServiceBuilder + // HeaderService defines the custom header service to be used in the app. + HeaderService header.Service } func DefaultStartUpConfig(t *testing.T) StartupConfig { @@ -125,6 +128,7 @@ func DefaultStartUpConfig(t *testing.T) StartupConfig { RouterServiceBuilder: runtime.NewRouterBuilder( stf.NewMsgRouterService, stf.NewQueryRouterService(), ), + HeaderService: services.NewGenesisHeaderService(stf.HeaderService{}), } } @@ -182,13 +186,13 @@ func NewApp( "minimum-gas-prices": "0stake", }, }, - services.NewGenesisHeaderService(stf.HeaderService{}), cometService, kvFactory, &eventService{}, storeBuilder, startupConfig.BranchService, startupConfig.RouterServiceBuilder, + startupConfig.HeaderService, ), depinject.Invoke( std.RegisterInterfaces, @@ -313,6 +317,10 @@ type App struct { txConfig client.TxConfig } +func (a App) LastBlockHeight() uint64 { + return a.lastHeight +} + // Deliver delivers a block with the given transactions and returns the resulting state. func (a *App) Deliver( t *testing.T, ctx context.Context, txs []stateMachineTx, @@ -327,6 +335,11 @@ func (a *App) Deliver( resp, state, err := a.DeliverBlock(ctx, req) require.NoError(t, err) a.lastHeight++ + // update block heigh if integeration context is present + iCtx, ok := ctx.Value(contextKey).(*integrationContext) + if ok { + iCtx.header.Height = int64(a.lastHeight) + } return resp, state } diff --git a/tests/integration/v2/auth/app_test.go b/tests/integration/v2/auth/app_test.go index 5c3c5a7e95cf..36326783216f 100644 --- a/tests/integration/v2/auth/app_test.go +++ b/tests/integration/v2/auth/app_test.go @@ -11,6 +11,8 @@ import ( "cosmossdk.io/depinject" "cosmossdk.io/log" "cosmossdk.io/runtime/v2" + "cosmossdk.io/runtime/v2/services" + "cosmossdk.io/server/v2/stf" "cosmossdk.io/x/accounts" basedepinject "cosmossdk.io/x/accounts/defaults/base/depinject" accountsv1 "cosmossdk.io/x/accounts/v1" @@ -79,6 +81,7 @@ func createTestSuite(t *testing.T) *suite { startupCfg.BranchService = &integration.BranchService{} startupCfg.RouterServiceBuilder = serviceBuilder + startupCfg.HeaderService = services.NewGenesisHeaderService(stf.HeaderService{}) res.app, err = integration.NewApp( depinject.Configs(configurator.NewAppV2Config(moduleConfigs...), depinject.Provide( diff --git a/tests/integration/v2/distribution/common_test.go b/tests/integration/v2/distribution/common_test.go new file mode 100644 index 000000000000..ff30842d9a6f --- /dev/null +++ b/tests/integration/v2/distribution/common_test.go @@ -0,0 +1,29 @@ +package distribution + +import ( + "testing" + + "gotest.tools/v3/assert" + + "cosmossdk.io/math" + "cosmossdk.io/x/distribution/types" + stakingtestutil "cosmossdk.io/x/staking/testutil" + stakingtypes "cosmossdk.io/x/staking/types" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func setupValidatorWithCommission(t *testing.T, f *fixture, valAddr sdk.ValAddress, initialStake int64) { + t.Helper() + initTokens := f.stakingKeeper.TokensFromConsensusPower(f.ctx, int64(1000)) + assert.NilError(t, f.bankKeeper.MintCoins(f.ctx, types.ModuleName, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens)))) + assert.NilError(t, f.stakingKeeper.Params.Set(f.ctx, stakingtypes.DefaultParams())) + + funds := f.stakingKeeper.TokensFromConsensusPower(f.ctx, int64(1000)) + assert.NilError(t, f.bankKeeper.SendCoinsFromModuleToAccount(f.ctx, types.ModuleName, sdk.AccAddress(valAddr), sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, funds)))) + f.authKeeper.SetAccount(f.ctx, f.authKeeper.NewAccountWithAddress(f.ctx, sdk.AccAddress(valAddr))) + + tstaking := stakingtestutil.NewHelper(t, f.ctx, f.stakingKeeper) + tstaking.Commission = stakingtypes.NewCommissionRates(math.LegacyNewDecWithPrec(5, 1), math.LegacyNewDecWithPrec(5, 1), math.LegacyNewDec(0)) + tstaking.CreateValidator(valAddr, valConsPk0, math.NewInt(initialStake), true) +} diff --git a/tests/integration/v2/distribution/fixture_test.go b/tests/integration/v2/distribution/fixture_test.go new file mode 100644 index 000000000000..d2ea156b5b69 --- /dev/null +++ b/tests/integration/v2/distribution/fixture_test.go @@ -0,0 +1,160 @@ +package distribution + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "cosmossdk.io/core/comet" + corecontext "cosmossdk.io/core/context" + "cosmossdk.io/core/router" + "cosmossdk.io/core/transaction" + "cosmossdk.io/depinject" + "cosmossdk.io/log" + "cosmossdk.io/runtime/v2" + _ "cosmossdk.io/x/accounts" // import as blank for app wiring + _ "cosmossdk.io/x/bank" // import as blank for app wiring + bankkeeper "cosmossdk.io/x/bank/keeper" + banktypes "cosmossdk.io/x/bank/types" + _ "cosmossdk.io/x/consensus" // import as blank for app wiring + _ "cosmossdk.io/x/distribution" // import as blank for app wiring + distrkeeper "cosmossdk.io/x/distribution/keeper" + _ "cosmossdk.io/x/mint" // import as blank for app wiring + _ "cosmossdk.io/x/protocolpool" // import as blank for app wiring + poolkeeper "cosmossdk.io/x/protocolpool/keeper" + _ "cosmossdk.io/x/staking" // import as blank for app wiring + stakingkeeper "cosmossdk.io/x/staking/keeper" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/tests/integration/v2" + "github.com/cosmos/cosmos-sdk/testutil/configurator" + simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" + sdk "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/cosmos-sdk/x/auth" // import as blank for app wiring + authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config" // import as blank for app wiring`` + _ "github.com/cosmos/cosmos-sdk/x/auth/vesting" // import as blank for app wiring + _ "github.com/cosmos/cosmos-sdk/x/genutil" // import as blank for app wiring +) + +var ( + emptyDelAddr sdk.AccAddress + emptyValAddr sdk.ValAddress +) + +var ( + PKS = simtestutil.CreateTestPubKeys(3) + + valConsPk0 = PKS[0] +) + +type fixture struct { + app *integration.App + + ctx context.Context + cdc codec.Codec + + queryClient distrkeeper.Querier + + authKeeper authkeeper.AccountKeeper + bankKeeper bankkeeper.Keeper + distrKeeper distrkeeper.Keeper + stakingKeeper *stakingkeeper.Keeper + poolKeeper poolkeeper.Keeper + + addr sdk.AccAddress + valAddr sdk.ValAddress +} + +func createTestFixture(t *testing.T) *fixture { + t.Helper() + res := fixture{} + + moduleConfigs := []configurator.ModuleOption{ + configurator.AccountsModule(), + configurator.AuthModule(), + configurator.BankModule(), + configurator.StakingModule(), + configurator.TxModule(), + configurator.ValidateModule(), + configurator.ConsensusModule(), + configurator.GenutilModule(), + configurator.DistributionModule(), + configurator.MintModule(), + configurator.ProtocolPoolModule(), + } + + var err error + startupCfg := integration.DefaultStartUpConfig(t) + + msgRouterService := integration.NewRouterService() + res.registerMsgRouterService(msgRouterService) + + var routerFactory runtime.RouterServiceFactory = func(_ []byte) router.Service { + return msgRouterService + } + + queryRouterService := integration.NewRouterService() + res.registerQueryRouterService(queryRouterService) + + serviceBuilder := runtime.NewRouterBuilder(routerFactory, queryRouterService) + + startupCfg.BranchService = &integration.BranchService{} + startupCfg.RouterServiceBuilder = serviceBuilder + startupCfg.HeaderService = &integration.HeaderService{} + + res.app, err = integration.NewApp( + depinject.Configs(configurator.NewAppV2Config(moduleConfigs...), depinject.Supply(log.NewNopLogger())), + startupCfg, + &res.bankKeeper, &res.distrKeeper, &res.authKeeper, &res.stakingKeeper, &res.poolKeeper, &res.cdc) + require.NoError(t, err) + + addr := sdk.AccAddress(PKS[0].Address()) + valAddr := sdk.ValAddress(addr) + valConsAddr := sdk.ConsAddress(valConsPk0.Address()) + + ctx := res.app.StateLatestContext(t) + res.addr = addr + res.valAddr = valAddr + + // set proposer and vote infos + res.ctx = context.WithValue(ctx, corecontext.CometInfoKey, comet.Info{ + LastCommit: comet.CommitInfo{ + Votes: []comet.VoteInfo{ + { + Validator: comet.Validator{ + Address: valAddr, + Power: 100, + }, + BlockIDFlag: comet.BlockIDFlagCommit, + }, + }, + }, + ProposerAddress: valConsAddr, + }) + + res.queryClient = distrkeeper.NewQuerier(res.distrKeeper) + + return &res +} + +func (s *fixture) registerMsgRouterService(router *integration.RouterService) { + // register custom router service + bankSendHandler := func(ctx context.Context, req transaction.Msg) (transaction.Msg, error) { + msg, ok := req.(*banktypes.MsgSend) + if !ok { + return nil, integration.ErrInvalidMsgType + } + msgServer := bankkeeper.NewMsgServerImpl(s.bankKeeper) + resp, err := msgServer.Send(ctx, msg) + return resp, err + } + + router.RegisterHandler(bankSendHandler, "cosmos.bank.v1beta1.MsgSend") +} + +func (s *fixture) registerQueryRouterService(router *integration.RouterService) { + // register custom router service + +} diff --git a/tests/integration/distribution/keeper/grpc_query_test.go b/tests/integration/v2/distribution/grpc_query_test.go similarity index 84% rename from tests/integration/distribution/keeper/grpc_query_test.go rename to tests/integration/v2/distribution/grpc_query_test.go index f83671fc4eac..eba52d317718 100644 --- a/tests/integration/distribution/keeper/grpc_query_test.go +++ b/tests/integration/v2/distribution/grpc_query_test.go @@ -1,4 +1,4 @@ -package keeper_test +package distribution import ( "fmt" @@ -17,9 +17,9 @@ import ( func TestGRPCParams(t *testing.T) { t.Parallel() - f := initFixture(t) + f := createTestFixture(t) - assert.NilError(t, f.distrKeeper.Params.Set(f.sdkCtx, types.DefaultParams())) + assert.NilError(t, f.distrKeeper.Params.Set(f.ctx, types.DefaultParams())) var ( params types.Params @@ -49,7 +49,7 @@ func TestGRPCParams(t *testing.T) { WithdrawAddrEnabled: true, } - assert.NilError(t, f.distrKeeper.Params.Set(f.sdkCtx, params)) + assert.NilError(t, f.distrKeeper.Params.Set(f.ctx, params)) expParams = params }, msg: &types.QueryParamsRequest{}, @@ -60,7 +60,7 @@ func TestGRPCParams(t *testing.T) { t.Run(fmt.Sprintf("Case %s", tc.name), func(t *testing.T) { tc.malleate() - paramsRes, err := f.queryClient.Params(f.sdkCtx, tc.msg) + paramsRes, err := f.queryClient.Params(f.ctx, tc.msg) assert.NilError(t, err) assert.Assert(t, paramsRes != nil) assert.DeepEqual(t, paramsRes.Params, expParams) @@ -71,9 +71,9 @@ func TestGRPCParams(t *testing.T) { func TestGRPCValidatorOutstandingRewards(t *testing.T) { t.Parallel() - f := initFixture(t) + f := createTestFixture(t) - assert.NilError(t, f.distrKeeper.Params.Set(f.sdkCtx, types.DefaultParams())) + assert.NilError(t, f.distrKeeper.Params.Set(f.ctx, types.DefaultParams())) setupValidatorWithCommission(t, f, f.valAddr, 10) // Setup a validator with commission valCommission := sdk.DecCoins{ @@ -82,10 +82,10 @@ func TestGRPCValidatorOutstandingRewards(t *testing.T) { } // set outstanding rewards - err := f.distrKeeper.ValidatorOutstandingRewards.Set(f.sdkCtx, f.valAddr, types.ValidatorOutstandingRewards{Rewards: valCommission}) + err := f.distrKeeper.ValidatorOutstandingRewards.Set(f.ctx, f.valAddr, types.ValidatorOutstandingRewards{Rewards: valCommission}) assert.NilError(t, err) - rewards, err := f.distrKeeper.ValidatorOutstandingRewards.Get(f.sdkCtx, f.valAddr) + rewards, err := f.distrKeeper.ValidatorOutstandingRewards.Get(f.ctx, f.valAddr) assert.NilError(t, err) testCases := []struct { @@ -116,7 +116,7 @@ func TestGRPCValidatorOutstandingRewards(t *testing.T) { for _, testCase := range testCases { tc := testCase t.Run(fmt.Sprintf("Case %s", tc.name), func(t *testing.T) { - validatorOutstandingRewards, err := f.queryClient.ValidatorOutstandingRewards(f.sdkCtx, tc.msg) + validatorOutstandingRewards, err := f.queryClient.ValidatorOutstandingRewards(f.ctx, tc.msg) if tc.expPass { assert.NilError(t, err) @@ -132,13 +132,13 @@ func TestGRPCValidatorOutstandingRewards(t *testing.T) { func TestGRPCValidatorCommission(t *testing.T) { t.Parallel() - f := initFixture(t) + f := createTestFixture(t) - assert.NilError(t, f.distrKeeper.Params.Set(f.sdkCtx, types.DefaultParams())) // Set default distribution parameters - setupValidatorWithCommission(t, f, f.valAddr, 10) // Setup a validator with commission + assert.NilError(t, f.distrKeeper.Params.Set(f.ctx, types.DefaultParams())) // Set default distribution parameters + setupValidatorWithCommission(t, f, f.valAddr, 10) // Setup a validator with commission commission := sdk.DecCoins{sdk.DecCoin{Denom: "token1", Amount: math.LegacyNewDec(4)}, {Denom: "token2", Amount: math.LegacyNewDec(2)}} - assert.NilError(t, f.distrKeeper.ValidatorsAccumulatedCommission.Set(f.sdkCtx, f.valAddr, types.ValidatorAccumulatedCommission{Commission: commission})) + assert.NilError(t, f.distrKeeper.ValidatorsAccumulatedCommission.Set(f.ctx, f.valAddr, types.ValidatorAccumulatedCommission{Commission: commission})) testCases := []struct { name string @@ -168,7 +168,7 @@ func TestGRPCValidatorCommission(t *testing.T) { for _, testCase := range testCases { tc := testCase t.Run(fmt.Sprintf("Case %s", tc.name), func(t *testing.T) { - commissionRes, err := f.queryClient.ValidatorCommission(f.sdkCtx, tc.msg) + commissionRes, err := f.queryClient.ValidatorCommission(f.ctx, tc.msg) if tc.expPass { assert.NilError(t, err) @@ -184,7 +184,7 @@ func TestGRPCValidatorCommission(t *testing.T) { func TestGRPCValidatorSlashes(t *testing.T) { t.Parallel() - f := initFixture(t) + f := createTestFixture(t) addr2 := sdk.AccAddress(PKS[1].Address()) valAddr2 := sdk.ValAddress(addr2) @@ -198,7 +198,7 @@ func TestGRPCValidatorSlashes(t *testing.T) { for i, slash := range slashes { err := f.distrKeeper.ValidatorSlashEvents.Set( - f.sdkCtx, + f.ctx, collections.Join3(f.valAddr, uint64(i+2), uint64(0)), slash, ) @@ -320,7 +320,7 @@ func TestGRPCValidatorSlashes(t *testing.T) { t.Run(fmt.Sprintf("Case %s", tc.name), func(t *testing.T) { tc.malleate() - slashesRes, err := f.queryClient.ValidatorSlashes(f.sdkCtx, req) + slashesRes, err := f.queryClient.ValidatorSlashes(f.ctx, req) if tc.expPass { assert.NilError(t, err) @@ -335,13 +335,13 @@ func TestGRPCValidatorSlashes(t *testing.T) { func TestGRPCDelegatorWithdrawAddress(t *testing.T) { t.Parallel() - f := initFixture(t) + f := createTestFixture(t) - assert.NilError(t, f.distrKeeper.Params.Set(f.sdkCtx, types.DefaultParams())) + assert.NilError(t, f.distrKeeper.Params.Set(f.ctx, types.DefaultParams())) addr2 := sdk.AccAddress(PKS[1].Address()) - err := f.distrKeeper.SetWithdrawAddr(f.sdkCtx, f.addr, addr2) + err := f.distrKeeper.SetWithdrawAddr(f.ctx, f.addr, addr2) assert.Assert(t, err == nil) testCases := []struct { @@ -366,7 +366,7 @@ func TestGRPCDelegatorWithdrawAddress(t *testing.T) { for _, testCase := range testCases { tc := testCase t.Run(fmt.Sprintf("Case %s", tc.name), func(t *testing.T) { - withdrawAddress, err := f.queryClient.DelegatorWithdrawAddress(f.sdkCtx, tc.msg) + withdrawAddress, err := f.queryClient.DelegatorWithdrawAddress(f.ctx, tc.msg) if tc.expPass { assert.NilError(t, err) @@ -381,7 +381,7 @@ func TestGRPCDelegatorWithdrawAddress(t *testing.T) { func TestGRPCCommunityPool(t *testing.T) { t.Parallel() - f := initFixture(t) + f := createTestFixture(t) var ( req *types.QueryCommunityPoolRequest //nolint:staticcheck // we're using a deprecated call @@ -403,10 +403,10 @@ func TestGRPCCommunityPool(t *testing.T) { name: "valid request", malleate: func() { amount := sdk.NewCoins(sdk.NewInt64Coin(sdk.DefaultBondDenom, 100)) - assert.NilError(t, f.bankKeeper.MintCoins(f.sdkCtx, types.ModuleName, amount)) - assert.NilError(t, f.bankKeeper.SendCoinsFromModuleToAccount(f.sdkCtx, types.ModuleName, f.addr, amount)) + assert.NilError(t, f.bankKeeper.MintCoins(f.ctx, types.ModuleName, amount)) + assert.NilError(t, f.bankKeeper.SendCoinsFromModuleToAccount(f.ctx, types.ModuleName, f.addr, amount)) - err := f.poolKeeper.FundCommunityPool(f.sdkCtx, amount, f.addr) + err := f.poolKeeper.FundCommunityPool(f.ctx, amount, f.addr) assert.Assert(t, err == nil) req = &types.QueryCommunityPoolRequest{} //nolint:staticcheck // we're using a deprecated call @@ -420,7 +420,7 @@ func TestGRPCCommunityPool(t *testing.T) { t.Run(fmt.Sprintf("Case %s", tc.name), func(t *testing.T) { testCase.malleate() - pool, err := f.queryClient.CommunityPool(f.sdkCtx, req) //nolint:staticcheck // we're using a deprecated call + pool, err := f.queryClient.CommunityPool(f.ctx, req) //nolint:staticcheck // we're using a deprecated call assert.NilError(t, err) assert.DeepEqual(t, expPool, pool) @@ -430,20 +430,20 @@ func TestGRPCCommunityPool(t *testing.T) { func TestGRPCDelegationRewards(t *testing.T) { t.Parallel() - f := initFixture(t) + f := createTestFixture(t) - assert.NilError(t, f.distrKeeper.FeePool.Set(f.sdkCtx, types.FeePool{ + assert.NilError(t, f.distrKeeper.FeePool.Set(f.ctx, types.FeePool{ CommunityPool: sdk.NewDecCoins(sdk.DecCoin{Denom: sdk.DefaultBondDenom, Amount: math.LegacyNewDec(1000)}), })) initialStake := int64(10) - assert.NilError(t, f.distrKeeper.Params.Set(f.sdkCtx, types.DefaultParams())) + assert.NilError(t, f.distrKeeper.Params.Set(f.ctx, types.DefaultParams())) setupValidatorWithCommission(t, f, f.valAddr, initialStake) // Setup a validator with commission - val, found := f.stakingKeeper.GetValidator(f.sdkCtx, f.valAddr) + val, found := f.stakingKeeper.GetValidator(f.ctx, f.valAddr) assert.Assert(t, found) // Set default staking params - assert.NilError(t, f.stakingKeeper.Params.Set(f.sdkCtx, stakingtypes.DefaultParams())) + assert.NilError(t, f.stakingKeeper.Params.Set(f.ctx, stakingtypes.DefaultParams())) addr2 := sdk.AccAddress(PKS[1].Address()) valAddr2 := sdk.ValAddress(addr2) @@ -453,19 +453,19 @@ func TestGRPCDelegationRewards(t *testing.T) { delTokens := sdk.TokensFromConsensusPower(2, sdk.DefaultPowerReduction) validator, issuedShares := val.AddTokensFromDel(delTokens) delegation := stakingtypes.NewDelegation(delAddr.String(), f.valAddr.String(), issuedShares) - assert.NilError(t, f.stakingKeeper.SetDelegation(f.sdkCtx, delegation)) + assert.NilError(t, f.stakingKeeper.SetDelegation(f.ctx, delegation)) valBz, err := f.stakingKeeper.ValidatorAddressCodec().StringToBytes(validator.GetOperator()) assert.NilError(t, err) - assert.NilError(t, f.distrKeeper.DelegatorStartingInfo.Set(f.sdkCtx, collections.Join(sdk.ValAddress(valBz), delAddr), types.NewDelegatorStartingInfo(2, math.LegacyNewDec(initialStake), 20))) + assert.NilError(t, f.distrKeeper.DelegatorStartingInfo.Set(f.ctx, collections.Join(sdk.ValAddress(valBz), delAddr), types.NewDelegatorStartingInfo(2, math.LegacyNewDec(initialStake), 20))) // setup validator rewards decCoins := sdk.DecCoins{sdk.NewDecCoinFromDec(sdk.DefaultBondDenom, math.LegacyOneDec())} historicalRewards := types.NewValidatorHistoricalRewards(decCoins, 2) - assert.NilError(t, f.distrKeeper.ValidatorHistoricalRewards.Set(f.sdkCtx, collections.Join(sdk.ValAddress(valBz), uint64(2)), historicalRewards)) + assert.NilError(t, f.distrKeeper.ValidatorHistoricalRewards.Set(f.ctx, collections.Join(sdk.ValAddress(valBz), uint64(2)), historicalRewards)) // setup current rewards and outstanding rewards currentRewards := types.NewValidatorCurrentRewards(decCoins, 3) - assert.NilError(t, f.distrKeeper.ValidatorCurrentRewards.Set(f.sdkCtx, f.valAddr, currentRewards)) - assert.NilError(t, f.distrKeeper.ValidatorOutstandingRewards.Set(f.sdkCtx, f.valAddr, types.ValidatorOutstandingRewards{Rewards: decCoins})) + assert.NilError(t, f.distrKeeper.ValidatorCurrentRewards.Set(f.ctx, f.valAddr, currentRewards)) + assert.NilError(t, f.distrKeeper.ValidatorOutstandingRewards.Set(f.ctx, f.valAddr, types.ValidatorOutstandingRewards{Rewards: decCoins})) expRes := &types.QueryDelegationRewardsResponse{ Rewards: sdk.DecCoins{sdk.DecCoin{Denom: sdk.DefaultBondDenom, Amount: math.LegacyNewDec(initialStake / 10)}}, @@ -524,7 +524,7 @@ func TestGRPCDelegationRewards(t *testing.T) { for _, testCase := range testCases { tc := testCase t.Run(fmt.Sprintf("Case %s", tc.name), func(t *testing.T) { - rewards, err := f.queryClient.DelegationRewards(f.sdkCtx, tc.msg) + rewards, err := f.queryClient.DelegationRewards(f.ctx, tc.msg) if tc.expPass { assert.NilError(t, err) diff --git a/tests/integration/v2/distribution/module_test.go b/tests/integration/v2/distribution/module_test.go new file mode 100644 index 000000000000..bb297800b8e5 --- /dev/null +++ b/tests/integration/v2/distribution/module_test.go @@ -0,0 +1,15 @@ +package distribution + +import ( + "testing" + + "cosmossdk.io/x/distribution/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "gotest.tools/v3/assert" +) + +func TestItCreatesModuleAccountOnInitBlock(t *testing.T) { + f := createTestFixture(t) + acc := f.authKeeper.GetAccount(f.ctx, authtypes.NewModuleAddress(types.ModuleName)) + assert.Assert(t, acc != nil) +} diff --git a/tests/integration/distribution/keeper/msg_server_test.go b/tests/integration/v2/distribution/msg_server_test.go similarity index 58% rename from tests/integration/distribution/keeper/msg_server_test.go rename to tests/integration/v2/distribution/msg_server_test.go index 3fc0d2d265d9..0af9a7063f49 100644 --- a/tests/integration/distribution/keeper/msg_server_test.go +++ b/tests/integration/v2/distribution/msg_server_test.go @@ -1,4 +1,4 @@ -package keeper_test +package distribution import ( "context" @@ -6,211 +6,32 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" "gotest.tools/v3/assert" "cosmossdk.io/collections" - "cosmossdk.io/core/appmodule" "cosmossdk.io/core/comet" - "cosmossdk.io/log" + corecontext "cosmossdk.io/core/context" + "cosmossdk.io/core/transaction" "cosmossdk.io/math" - storetypes "cosmossdk.io/store/types" - "cosmossdk.io/x/bank" - bankkeeper "cosmossdk.io/x/bank/keeper" - banktypes "cosmossdk.io/x/bank/types" - "cosmossdk.io/x/consensus" - consensusparamkeeper "cosmossdk.io/x/consensus/keeper" - consensustypes "cosmossdk.io/x/consensus/types" - "cosmossdk.io/x/distribution" distrkeeper "cosmossdk.io/x/distribution/keeper" distrtypes "cosmossdk.io/x/distribution/types" - "cosmossdk.io/x/protocolpool" - poolkeeper "cosmossdk.io/x/protocolpool/keeper" pooltypes "cosmossdk.io/x/protocolpool/types" - "cosmossdk.io/x/staking" - stakingkeeper "cosmossdk.io/x/staking/keeper" stakingtestutil "cosmossdk.io/x/staking/testutil" stakingtypes "cosmossdk.io/x/staking/types" - "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/cosmos/cosmos-sdk/codec" - addresscodec "github.com/cosmos/cosmos-sdk/codec/address" - codectestutil "github.com/cosmos/cosmos-sdk/codec/testutil" - "github.com/cosmos/cosmos-sdk/runtime" - "github.com/cosmos/cosmos-sdk/testutil/integration" + "github.com/cosmos/cosmos-sdk/tests/integration/v2" sdk "github.com/cosmos/cosmos-sdk/types" - moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" - "github.com/cosmos/cosmos-sdk/x/auth" - authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" - authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation" - authtestutil "github.com/cosmos/cosmos-sdk/x/auth/testutil" - authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" ) -var ( - emptyDelAddr sdk.AccAddress - emptyValAddr sdk.ValAddress -) - -type fixture struct { - app *integration.App - - sdkCtx sdk.Context - cdc codec.Codec - keys map[string]*storetypes.KVStoreKey - - queryClient distrtypes.QueryClient - - accountKeeper authkeeper.AccountKeeper - bankKeeper bankkeeper.Keeper - distrKeeper distrkeeper.Keeper - stakingKeeper *stakingkeeper.Keeper - poolKeeper poolkeeper.Keeper - - addr sdk.AccAddress - valAddr sdk.ValAddress -} - -func initFixture(t *testing.T) *fixture { - t.Helper() - keys := storetypes.NewKVStoreKeys( - authtypes.StoreKey, banktypes.StoreKey, distrtypes.StoreKey, pooltypes.StoreKey, stakingtypes.StoreKey, - consensustypes.StoreKey, - ) - encodingCfg := moduletestutil.MakeTestEncodingConfig(codectestutil.CodecOptions{}, auth.AppModule{}, bank.AppModule{}) - cdc := encodingCfg.Codec - - logger := log.NewTestLogger(t) - authority := authtypes.NewModuleAddress("gov") - - maccPerms := map[string][]string{ - pooltypes.ModuleName: {}, - pooltypes.StreamAccount: {}, - pooltypes.ProtocolPoolDistrAccount: {}, - distrtypes.ModuleName: {authtypes.Minter}, - stakingtypes.BondedPoolName: {authtypes.Burner, authtypes.Staking}, - stakingtypes.NotBondedPoolName: {authtypes.Burner, authtypes.Staking}, - } - - // gomock initializations - ctrl := gomock.NewController(t) - acctsModKeeper := authtestutil.NewMockAccountsModKeeper(ctrl) - accNum := uint64(0) - acctsModKeeper.EXPECT().NextAccountNumber(gomock.Any()).AnyTimes().DoAndReturn(func(ctx context.Context) (uint64, error) { - currentNum := accNum - accNum++ - return currentNum, nil - }) - - accountKeeper := authkeeper.NewAccountKeeper( - runtime.NewEnvironment(runtime.NewKVStoreService(keys[authtypes.StoreKey]), log.NewNopLogger()), - cdc, - authtypes.ProtoBaseAccount, - acctsModKeeper, - maccPerms, - addresscodec.NewBech32Codec(sdk.Bech32MainPrefix), - sdk.Bech32MainPrefix, - authority.String(), - ) - - blockedAddresses := map[string]bool{ - accountKeeper.GetAuthority(): false, - } - bankKeeper := bankkeeper.NewBaseKeeper( - runtime.NewEnvironment(runtime.NewKVStoreService(keys[banktypes.StoreKey]), log.NewNopLogger()), - cdc, - accountKeeper, - blockedAddresses, - authority.String(), - ) - - msgRouter := baseapp.NewMsgServiceRouter() - grpcRouter := baseapp.NewGRPCQueryRouter() - cometService := runtime.NewContextAwareCometInfoService() - - consensusParamsKeeper := consensusparamkeeper.NewKeeper(cdc, runtime.NewEnvironment(runtime.NewKVStoreService(keys[consensustypes.StoreKey]), log.NewNopLogger(), runtime.EnvWithQueryRouterService(grpcRouter), runtime.EnvWithMsgRouterService(msgRouter)), authtypes.NewModuleAddress("gov").String()) - stakingKeeper := stakingkeeper.NewKeeper(cdc, runtime.NewEnvironment(runtime.NewKVStoreService(keys[stakingtypes.StoreKey]), log.NewNopLogger(), runtime.EnvWithQueryRouterService(grpcRouter), runtime.EnvWithMsgRouterService(msgRouter)), accountKeeper, bankKeeper, consensusParamsKeeper, authority.String(), addresscodec.NewBech32Codec(sdk.Bech32PrefixValAddr), addresscodec.NewBech32Codec(sdk.Bech32PrefixConsAddr), cometService) - - poolKeeper := poolkeeper.NewKeeper(cdc, runtime.NewEnvironment(runtime.NewKVStoreService(keys[pooltypes.StoreKey]), log.NewNopLogger()), accountKeeper, bankKeeper, authority.String()) - - distrKeeper := distrkeeper.NewKeeper( - cdc, runtime.NewEnvironment(runtime.NewKVStoreService(keys[distrtypes.StoreKey]), logger), accountKeeper, bankKeeper, stakingKeeper, cometService, distrtypes.ModuleName, authority.String(), - ) - - authModule := auth.NewAppModule(cdc, accountKeeper, acctsModKeeper, authsims.RandomGenesisAccounts, nil) - bankModule := bank.NewAppModule(cdc, bankKeeper, accountKeeper) - stakingModule := staking.NewAppModule(cdc, stakingKeeper) - distrModule := distribution.NewAppModule(cdc, distrKeeper, stakingKeeper) - poolModule := protocolpool.NewAppModule(cdc, poolKeeper, accountKeeper, bankKeeper) - consensusModule := consensus.NewAppModule(cdc, consensusParamsKeeper) - - addr := sdk.AccAddress(PKS[0].Address()) - valAddr := sdk.ValAddress(addr) - valConsAddr := sdk.ConsAddress(valConsPk0.Address()) - - integrationApp := integration.NewIntegrationApp(logger, keys, cdc, - encodingCfg.InterfaceRegistry.SigningContext().AddressCodec(), - encodingCfg.InterfaceRegistry.SigningContext().ValidatorAddressCodec(), - map[string]appmodule.AppModule{ - authtypes.ModuleName: authModule, - banktypes.ModuleName: bankModule, - stakingtypes.ModuleName: stakingModule, - distrtypes.ModuleName: distrModule, - pooltypes.ModuleName: poolModule, - consensustypes.ModuleName: consensusModule, - }, - msgRouter, - grpcRouter, - ) - - // set proposer and vote infos - sdkCtx := sdk.UnwrapSDKContext(integrationApp.Context()).WithProposer(valConsAddr).WithCometInfo(comet.Info{ - LastCommit: comet.CommitInfo{ - Votes: []comet.VoteInfo{ - { - Validator: comet.Validator{ - Address: valAddr, - Power: 100, - }, - BlockIDFlag: comet.BlockIDFlagCommit, - }, - }, - }, - ProposerAddress: valConsAddr, - }) - - // Register MsgServer and QueryServer - distrtypes.RegisterMsgServer(integrationApp.MsgServiceRouter(), distrkeeper.NewMsgServerImpl(distrKeeper)) - distrtypes.RegisterQueryServer(integrationApp.QueryHelper(), distrkeeper.NewQuerier(distrKeeper)) - - qr := integrationApp.QueryHelper() - distrQueryClient := distrtypes.NewQueryClient(qr) - - return &fixture{ - app: integrationApp, - sdkCtx: sdkCtx, - cdc: cdc, - keys: keys, - accountKeeper: accountKeeper, - bankKeeper: bankKeeper, - distrKeeper: distrKeeper, - stakingKeeper: stakingKeeper, - poolKeeper: poolKeeper, - addr: addr, - valAddr: valAddr, - queryClient: distrQueryClient, - } -} - func TestMsgWithdrawDelegatorReward(t *testing.T) { t.Parallel() - f := initFixture(t) + f := createTestFixture(t) - err := f.distrKeeper.FeePool.Set(f.sdkCtx, distrtypes.FeePool{ + err := f.distrKeeper.FeePool.Set(f.ctx, distrtypes.FeePool{ CommunityPool: sdk.NewDecCoins(sdk.DecCoin{Denom: "stake", Amount: math.LegacyNewDec(10000)}), }) require.NoError(t, err) - require.NoError(t, f.distrKeeper.Params.Set(f.sdkCtx, distrtypes.DefaultParams())) + require.NoError(t, f.distrKeeper.Params.Set(f.ctx, distrtypes.DefaultParams())) delAddr := sdk.AccAddress(PKS[1].Address()) @@ -227,17 +48,17 @@ func TestMsgWithdrawDelegatorReward(t *testing.T) { assert.NilError(t, err) validator.DelegatorShares = math.LegacyNewDec(100) validator.Tokens = math.NewInt(1000000) - assert.NilError(t, f.stakingKeeper.SetValidator(f.sdkCtx, validator)) + assert.NilError(t, f.stakingKeeper.SetValidator(f.ctx, validator)) // set module account coins - initTokens := f.stakingKeeper.TokensFromConsensusPower(f.sdkCtx, int64(1000)) - err = f.bankKeeper.MintCoins(f.sdkCtx, distrtypes.ModuleName, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens))) + initTokens := f.stakingKeeper.TokensFromConsensusPower(f.ctx, int64(1000)) + err = f.bankKeeper.MintCoins(f.ctx, distrtypes.ModuleName, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens))) require.NoError(t, err) // send funds to val addr - err = f.bankKeeper.SendCoinsFromModuleToAccount(f.sdkCtx, distrtypes.ModuleName, sdk.AccAddress(f.valAddr), sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens))) + err = f.bankKeeper.SendCoinsFromModuleToAccount(f.ctx, distrtypes.ModuleName, sdk.AccAddress(f.valAddr), sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens))) require.NoError(t, err) - initBalance := f.bankKeeper.GetAllBalances(f.sdkCtx, delAddr) + initBalance := f.bankKeeper.GetAllBalances(f.ctx, delAddr) // setup delegation delTokens := sdk.TokensFromConsensusPower(2, sdk.DefaultPowerReduction) @@ -245,18 +66,18 @@ func TestMsgWithdrawDelegatorReward(t *testing.T) { valBz, err := f.stakingKeeper.ValidatorAddressCodec().StringToBytes(validator.GetOperator()) require.NoError(t, err) delegation := stakingtypes.NewDelegation(delAddr.String(), validator.GetOperator(), issuedShares) - require.NoError(t, f.stakingKeeper.SetDelegation(f.sdkCtx, delegation)) - require.NoError(t, f.distrKeeper.DelegatorStartingInfo.Set(f.sdkCtx, collections.Join(sdk.ValAddress(valBz), delAddr), distrtypes.NewDelegatorStartingInfo(2, math.LegacyOneDec(), 20))) + require.NoError(t, f.stakingKeeper.SetDelegation(f.ctx, delegation)) + require.NoError(t, f.distrKeeper.DelegatorStartingInfo.Set(f.ctx, collections.Join(sdk.ValAddress(valBz), delAddr), distrtypes.NewDelegatorStartingInfo(2, math.LegacyOneDec(), 20))) // setup validator rewards decCoins := sdk.DecCoins{sdk.NewDecCoinFromDec(sdk.DefaultBondDenom, math.LegacyOneDec())} historicalRewards := distrtypes.NewValidatorHistoricalRewards(decCoins, 2) - err = f.distrKeeper.ValidatorHistoricalRewards.Set(f.sdkCtx, collections.Join(sdk.ValAddress(valBz), uint64(2)), historicalRewards) + err = f.distrKeeper.ValidatorHistoricalRewards.Set(f.ctx, collections.Join(sdk.ValAddress(valBz), uint64(2)), historicalRewards) require.NoError(t, err) // setup current rewards and outstanding rewards currentRewards := distrtypes.NewValidatorCurrentRewards(decCoins, 3) - err = f.distrKeeper.ValidatorCurrentRewards.Set(f.sdkCtx, f.valAddr, currentRewards) + err = f.distrKeeper.ValidatorCurrentRewards.Set(f.ctx, f.valAddr, currentRewards) require.NoError(t, err) - err = f.distrKeeper.ValidatorOutstandingRewards.Set(f.sdkCtx, f.valAddr, distrtypes.ValidatorOutstandingRewards{Rewards: valCommission}) + err = f.distrKeeper.ValidatorOutstandingRewards.Set(f.ctx, f.valAddr, distrtypes.ValidatorOutstandingRewards{Rewards: valCommission}) require.NoError(t, err) testCases := []struct { @@ -320,12 +141,17 @@ func TestMsgWithdrawDelegatorReward(t *testing.T) { }, } height := f.app.LastBlockHeight() + msgServer := distrkeeper.NewMsgServerImpl(f.distrKeeper) for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { res, err := f.app.RunMsg( - tc.msg, - integration.WithAutomaticFinalizeBlock(), + t, + f.ctx, + func(ctx context.Context) (transaction.Msg, error) { + resp, e := msgServer.WithdrawDelegatorReward(ctx, tc.msg) + return resp, e + }, integration.WithAutomaticCommit(), ) @@ -341,17 +167,17 @@ func TestMsgWithdrawDelegatorReward(t *testing.T) { assert.Assert(t, res != nil) // check the result - result := distrtypes.MsgWithdrawDelegatorRewardResponse{} - err := f.cdc.Unmarshal(res.Value, &result) - assert.NilError(t, err) + _, ok := res.(*distrtypes.MsgWithdrawDelegatorRewardResponse) + assert.Assert(t, ok, true) // check current balance is greater than initial balance - curBalance := f.bankKeeper.GetAllBalances(f.sdkCtx, sdk.AccAddress(f.valAddr)) + curBalance := f.bankKeeper.GetAllBalances(f.ctx, sdk.AccAddress(f.valAddr)) assert.Assert(t, initBalance.IsAllLTE(curBalance)) } var previousTotalPower int64 - for _, vote := range f.sdkCtx.CometInfo().LastCommit.Votes { + cometInfo := f.ctx.Value(corecontext.CometInfoKey).(comet.Info) + for _, vote := range cometInfo.LastCommit.Votes { previousTotalPower += vote.Validator.Power } assert.Equal(t, previousTotalPower, int64(100)) @@ -361,9 +187,9 @@ func TestMsgWithdrawDelegatorReward(t *testing.T) { func TestMsgSetWithdrawAddress(t *testing.T) { t.Parallel() - f := initFixture(t) + f := createTestFixture(t) - require.NoError(t, f.distrKeeper.Params.Set(f.sdkCtx, distrtypes.DefaultParams())) + require.NoError(t, f.distrKeeper.Params.Set(f.ctx, distrtypes.DefaultParams())) delAddr := sdk.AccAddress(PKS[0].Address()) withdrawAddr := sdk.AccAddress(PKS[1].Address()) @@ -378,9 +204,9 @@ func TestMsgSetWithdrawAddress(t *testing.T) { { name: "empty delegator address", preRun: func() { - params, _ := f.distrKeeper.Params.Get(f.sdkCtx) + params, _ := f.distrKeeper.Params.Get(f.ctx) params.WithdrawAddrEnabled = true - assert.NilError(t, f.distrKeeper.Params.Set(f.sdkCtx, params)) + assert.NilError(t, f.distrKeeper.Params.Set(f.ctx, params)) }, msg: &distrtypes.MsgSetWithdrawAddress{ DelegatorAddress: emptyDelAddr.String(), @@ -392,9 +218,9 @@ func TestMsgSetWithdrawAddress(t *testing.T) { { name: "empty withdraw address", preRun: func() { - params, _ := f.distrKeeper.Params.Get(f.sdkCtx) + params, _ := f.distrKeeper.Params.Get(f.ctx) params.WithdrawAddrEnabled = true - assert.NilError(t, f.distrKeeper.Params.Set(f.sdkCtx, params)) + assert.NilError(t, f.distrKeeper.Params.Set(f.ctx, params)) }, msg: &distrtypes.MsgSetWithdrawAddress{ DelegatorAddress: delAddr.String(), @@ -406,9 +232,9 @@ func TestMsgSetWithdrawAddress(t *testing.T) { { name: "both empty addresses", preRun: func() { - params, _ := f.distrKeeper.Params.Get(f.sdkCtx) + params, _ := f.distrKeeper.Params.Get(f.ctx) params.WithdrawAddrEnabled = true - assert.NilError(t, f.distrKeeper.Params.Set(f.sdkCtx, params)) + assert.NilError(t, f.distrKeeper.Params.Set(f.ctx, params)) }, msg: &distrtypes.MsgSetWithdrawAddress{ DelegatorAddress: emptyDelAddr.String(), @@ -420,9 +246,9 @@ func TestMsgSetWithdrawAddress(t *testing.T) { { name: "withdraw address disabled", preRun: func() { - params, _ := f.distrKeeper.Params.Get(f.sdkCtx) + params, _ := f.distrKeeper.Params.Get(f.ctx) params.WithdrawAddrEnabled = false - assert.NilError(t, f.distrKeeper.Params.Set(f.sdkCtx, params)) + assert.NilError(t, f.distrKeeper.Params.Set(f.ctx, params)) }, msg: &distrtypes.MsgSetWithdrawAddress{ DelegatorAddress: delAddr.String(), @@ -434,9 +260,9 @@ func TestMsgSetWithdrawAddress(t *testing.T) { { name: "valid msg with same delegator and withdraw address", preRun: func() { - params, _ := f.distrKeeper.Params.Get(f.sdkCtx) + params, _ := f.distrKeeper.Params.Get(f.ctx) params.WithdrawAddrEnabled = true - assert.NilError(t, f.distrKeeper.Params.Set(f.sdkCtx, params)) + assert.NilError(t, f.distrKeeper.Params.Set(f.ctx, params)) }, msg: &distrtypes.MsgSetWithdrawAddress{ DelegatorAddress: delAddr.String(), @@ -447,9 +273,9 @@ func TestMsgSetWithdrawAddress(t *testing.T) { { name: "valid msg", preRun: func() { - params, _ := f.distrKeeper.Params.Get(f.sdkCtx) + params, _ := f.distrKeeper.Params.Get(f.ctx) params.WithdrawAddrEnabled = true - assert.NilError(t, f.distrKeeper.Params.Set(f.sdkCtx, params)) + assert.NilError(t, f.distrKeeper.Params.Set(f.ctx, params)) }, msg: &distrtypes.MsgSetWithdrawAddress{ DelegatorAddress: delAddr.String(), @@ -458,31 +284,37 @@ func TestMsgSetWithdrawAddress(t *testing.T) { expErr: false, }, } + + msgServer := distrkeeper.NewMsgServerImpl(f.distrKeeper) + for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { tc.preRun() res, err := f.app.RunMsg( - tc.msg, - integration.WithAutomaticFinalizeBlock(), + t, + f.ctx, + func(ctx context.Context) (transaction.Msg, error) { + resp, e := msgServer.SetWithdrawAddress(ctx, tc.msg) + return resp, e + }, integration.WithAutomaticCommit(), ) if tc.expErr { assert.ErrorContains(t, err, tc.expErrMsg) // query the delegator withdraw address - addr, _ := f.distrKeeper.GetDelegatorWithdrawAddr(f.sdkCtx, delAddr) + addr, _ := f.distrKeeper.GetDelegatorWithdrawAddr(f.ctx, delAddr) assert.DeepEqual(t, addr, delAddr) } else { assert.NilError(t, err) assert.Assert(t, res != nil) // check the result - result := distrtypes.MsgSetWithdrawAddressResponse{} - err = f.cdc.Unmarshal(res.Value, &result) - assert.NilError(t, err) + _, ok := res.(*distrtypes.MsgSetWithdrawAddressResponse) + assert.Assert(t, ok, true) // query the delegator withdraw address - addr, _ := f.distrKeeper.GetDelegatorWithdrawAddr(f.sdkCtx, delAddr) + addr, _ := f.distrKeeper.GetDelegatorWithdrawAddr(f.ctx, delAddr) assert.DeepEqual(t, addr.String(), tc.msg.WithdrawAddress) } }) @@ -491,7 +323,7 @@ func TestMsgSetWithdrawAddress(t *testing.T) { func TestMsgWithdrawValidatorCommission(t *testing.T) { t.Parallel() - f := initFixture(t) + f := createTestFixture(t) valCommission := sdk.DecCoins{ sdk.NewDecCoinFromDec("mytoken", math.LegacyNewDec(5).Quo(math.LegacyNewDec(4))), @@ -499,28 +331,28 @@ func TestMsgWithdrawValidatorCommission(t *testing.T) { } // set module account coins - initTokens := f.stakingKeeper.TokensFromConsensusPower(f.sdkCtx, int64(1000)) - err := f.bankKeeper.MintCoins(f.sdkCtx, distrtypes.ModuleName, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens))) + initTokens := f.stakingKeeper.TokensFromConsensusPower(f.ctx, int64(1000)) + err := f.bankKeeper.MintCoins(f.ctx, distrtypes.ModuleName, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens))) require.NoError(t, err) // send funds to val addr - err = f.bankKeeper.SendCoinsFromModuleToAccount(f.sdkCtx, distrtypes.ModuleName, sdk.AccAddress(f.valAddr), sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens))) + err = f.bankKeeper.SendCoinsFromModuleToAccount(f.ctx, distrtypes.ModuleName, sdk.AccAddress(f.valAddr), sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens))) require.NoError(t, err) coins := sdk.NewCoins(sdk.NewCoin("mytoken", math.NewInt(2)), sdk.NewCoin("stake", math.NewInt(2))) - err = f.bankKeeper.MintCoins(f.sdkCtx, distrtypes.ModuleName, coins) + err = f.bankKeeper.MintCoins(f.ctx, distrtypes.ModuleName, coins) require.NoError(t, err) // check initial balance - balance := f.bankKeeper.GetAllBalances(f.sdkCtx, sdk.AccAddress(f.valAddr)) - expTokens := f.stakingKeeper.TokensFromConsensusPower(f.sdkCtx, 1000) + balance := f.bankKeeper.GetAllBalances(f.ctx, sdk.AccAddress(f.valAddr)) + expTokens := f.stakingKeeper.TokensFromConsensusPower(f.ctx, 1000) expCoins := sdk.NewCoins(sdk.NewCoin("stake", expTokens)) assert.DeepEqual(t, expCoins, balance) // set outstanding rewards - err = f.distrKeeper.ValidatorOutstandingRewards.Set(f.sdkCtx, f.valAddr, distrtypes.ValidatorOutstandingRewards{Rewards: valCommission}) + err = f.distrKeeper.ValidatorOutstandingRewards.Set(f.ctx, f.valAddr, distrtypes.ValidatorOutstandingRewards{Rewards: valCommission}) require.NoError(t, err) // set commission - err = f.distrKeeper.ValidatorsAccumulatedCommission.Set(f.sdkCtx, f.valAddr, distrtypes.ValidatorAccumulatedCommission{Commission: valCommission}) + err = f.distrKeeper.ValidatorsAccumulatedCommission.Set(f.ctx, f.valAddr, distrtypes.ValidatorAccumulatedCommission{Commission: valCommission}) require.NoError(t, err) testCases := []struct { @@ -554,11 +386,17 @@ func TestMsgWithdrawValidatorCommission(t *testing.T) { }, } + msgServer := distrkeeper.NewMsgServerImpl(f.distrKeeper) + for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { res, err := f.app.RunMsg( - tc.msg, - integration.WithAutomaticFinalizeBlock(), + t, + f.ctx, + func(ctx context.Context) (transaction.Msg, error) { + resp, e := msgServer.WithdrawValidatorCommission(ctx, tc.msg) + return resp, e + }, integration.WithAutomaticCommit(), ) if tc.expErr { @@ -568,19 +406,18 @@ func TestMsgWithdrawValidatorCommission(t *testing.T) { assert.Assert(t, res != nil) // check the result - result := distrtypes.MsgWithdrawValidatorCommissionResponse{} - err = f.cdc.Unmarshal(res.Value, &result) - assert.NilError(t, err) + _, ok := res.(*distrtypes.MsgWithdrawValidatorCommissionResponse) + assert.Assert(t, ok, true) // check balance increase - balance = f.bankKeeper.GetAllBalances(f.sdkCtx, sdk.AccAddress(f.valAddr)) + balance = f.bankKeeper.GetAllBalances(f.ctx, sdk.AccAddress(f.valAddr)) assert.DeepEqual(t, sdk.NewCoins( sdk.NewCoin("mytoken", math.NewInt(1)), sdk.NewCoin("stake", expTokens.AddRaw(1)), ), balance) // check remainder - remainder, err := f.distrKeeper.ValidatorsAccumulatedCommission.Get(f.sdkCtx, f.valAddr) + remainder, err := f.distrKeeper.ValidatorsAccumulatedCommission.Get(f.ctx, f.valAddr) require.NoError(t, err) assert.DeepEqual(t, sdk.DecCoins{ sdk.NewDecCoinFromDec("mytoken", math.LegacyNewDec(1).Quo(math.LegacyNewDec(4))), @@ -593,21 +430,21 @@ func TestMsgWithdrawValidatorCommission(t *testing.T) { func TestMsgFundCommunityPool(t *testing.T) { t.Parallel() - f := initFixture(t) + f := createTestFixture(t) addr := sdk.AccAddress(PKS[0].Address()) addr2 := sdk.AccAddress(PKS[1].Address()) amount := sdk.NewCoins(sdk.NewInt64Coin("stake", 100)) - poolAcc := f.accountKeeper.GetModuleAccount(f.sdkCtx, pooltypes.ModuleName) + poolAcc := f.authKeeper.GetModuleAccount(f.ctx, pooltypes.ModuleName) // check that the pool account balance is empty - assert.Assert(t, f.bankKeeper.GetAllBalances(f.sdkCtx, poolAcc.GetAddress()).Empty()) + assert.Assert(t, f.bankKeeper.GetAllBalances(f.ctx, poolAcc.GetAddress()).Empty()) // fund the account by minting and sending amount from distribution module to addr - err := f.bankKeeper.MintCoins(f.sdkCtx, distrtypes.ModuleName, amount) + err := f.bankKeeper.MintCoins(f.ctx, distrtypes.ModuleName, amount) assert.NilError(t, err) - err = f.bankKeeper.SendCoinsFromModuleToAccount(f.sdkCtx, distrtypes.ModuleName, addr, amount) + err = f.bankKeeper.SendCoinsFromModuleToAccount(f.ctx, distrtypes.ModuleName, addr, amount) assert.NilError(t, err) testCases := []struct { @@ -652,11 +489,18 @@ func TestMsgFundCommunityPool(t *testing.T) { expErr: false, }, } + + msgServer := distrkeeper.NewMsgServerImpl(f.distrKeeper) + for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { res, err := f.app.RunMsg( - tc.msg, - integration.WithAutomaticFinalizeBlock(), + t, + f.ctx, + func(ctx context.Context) (transaction.Msg, error) { + res, err := msgServer.FundCommunityPool(ctx, tc.msg) //nolint:staticcheck // we're using a deprecated call + return res, err + }, integration.WithAutomaticCommit(), ) if tc.expErr { @@ -666,15 +510,14 @@ func TestMsgFundCommunityPool(t *testing.T) { assert.Assert(t, res != nil) // check the result - result := distrtypes.MsgFundCommunityPool{} //nolint:staticcheck // we're using a deprecated call - err = f.cdc.Unmarshal(res.Value, &result) - assert.NilError(t, err) + _, ok := res.(*distrtypes.MsgFundCommunityPoolResponse) //nolint:staticcheck // we're using a deprecated call + assert.Assert(t, ok, true) // query the community pool funds - poolBal := f.bankKeeper.GetAllBalances(f.sdkCtx, poolAcc.GetAddress()) + poolBal := f.bankKeeper.GetAllBalances(f.ctx, poolAcc.GetAddress()) assert.Assert(t, poolBal.Equal(amount)) - assert.Assert(t, f.bankKeeper.GetAllBalances(f.sdkCtx, addr).Empty()) + assert.Assert(t, f.bankKeeper.GetAllBalances(f.ctx, addr).Empty()) } }) } @@ -682,7 +525,7 @@ func TestMsgFundCommunityPool(t *testing.T) { func TestMsgUpdateParams(t *testing.T) { t.Parallel() - f := initFixture(t) + f := createTestFixture(t) // default params communityTax := math.LegacyNewDecWithPrec(2, 2) // 2% @@ -793,11 +636,17 @@ func TestMsgUpdateParams(t *testing.T) { }, } + msgServer := distrkeeper.NewMsgServerImpl(f.distrKeeper) + for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { res, err := f.app.RunMsg( - tc.msg, - integration.WithAutomaticFinalizeBlock(), + t, + f.ctx, + func(ctx context.Context) (transaction.Msg, error) { + resp, e := msgServer.UpdateParams(ctx, tc.msg) + return resp, e + }, integration.WithAutomaticCommit(), ) if tc.expErr { @@ -807,12 +656,11 @@ func TestMsgUpdateParams(t *testing.T) { assert.Assert(t, res != nil) // check the result - result := distrtypes.MsgUpdateParams{} - err = f.cdc.Unmarshal(res.Value, &result) - assert.NilError(t, err) + _, ok := res.(*distrtypes.MsgUpdateParamsResponse) + assert.Assert(t, ok, true) // query the params and verify it has been updated - params, _ := f.distrKeeper.Params.Get(f.sdkCtx) + params, _ := f.distrKeeper.Params.Get(f.ctx) assert.DeepEqual(t, distrtypes.DefaultParams(), params) } }) @@ -821,20 +669,20 @@ func TestMsgUpdateParams(t *testing.T) { func TestMsgCommunityPoolSpend(t *testing.T) { t.Parallel() - f := initFixture(t) + f := createTestFixture(t) - initTokens := f.stakingKeeper.TokensFromConsensusPower(f.sdkCtx, int64(100)) - err := f.bankKeeper.MintCoins(f.sdkCtx, distrtypes.ModuleName, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens))) + initTokens := f.stakingKeeper.TokensFromConsensusPower(f.ctx, int64(100)) + err := f.bankKeeper.MintCoins(f.ctx, distrtypes.ModuleName, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens))) require.NoError(t, err) // fund pool module account amount := sdk.NewCoins(sdk.NewInt64Coin("stake", 100)) - poolAcc := f.accountKeeper.GetModuleAccount(f.sdkCtx, pooltypes.ModuleName) - err = f.bankKeeper.SendCoinsFromModuleToModule(f.sdkCtx, distrtypes.ModuleName, poolAcc.GetName(), amount) + poolAcc := f.authKeeper.GetModuleAccount(f.ctx, pooltypes.ModuleName) + err = f.bankKeeper.SendCoinsFromModuleToModule(f.ctx, distrtypes.ModuleName, poolAcc.GetName(), amount) require.NoError(t, err) // query the community pool to verify it has been updated with balance - poolBal := f.bankKeeper.GetAllBalances(f.sdkCtx, poolAcc.GetAddress()) + poolBal := f.bankKeeper.GetAllBalances(f.ctx, poolAcc.GetAddress()) assert.Assert(t, poolBal.Equal(amount)) recipient := sdk.AccAddress([]byte("addr1")) @@ -875,11 +723,18 @@ func TestMsgCommunityPoolSpend(t *testing.T) { expErr: false, }, } + + msgServer := distrkeeper.NewMsgServerImpl(f.distrKeeper) + for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { res, err := f.app.RunMsg( - tc.msg, - integration.WithAutomaticFinalizeBlock(), + t, + f.ctx, + func(ctx context.Context) (transaction.Msg, error) { + res, e := msgServer.CommunityPoolSpend(ctx, tc.msg) //nolint:staticcheck // we're using a deprecated call + return res, e + }, integration.WithAutomaticCommit(), ) if tc.expErr { @@ -889,12 +744,11 @@ func TestMsgCommunityPoolSpend(t *testing.T) { assert.Assert(t, res != nil) // check the result - result := distrtypes.MsgCommunityPoolSpend{} //nolint:staticcheck // we're using a deprecated call - err = f.cdc.Unmarshal(res.Value, &result) - assert.NilError(t, err) + _, ok := res.(*distrtypes.MsgCommunityPoolSpendResponse) //nolint:staticcheck // we're using a deprecated call + assert.Assert(t, ok, true) // query the community pool to verify it has been updated - poolBal := f.bankKeeper.GetAllBalances(f.sdkCtx, poolAcc.GetAddress()) + poolBal := f.bankKeeper.GetAllBalances(f.ctx, poolAcc.GetAddress()) assert.Assert(t, poolBal.Empty()) } @@ -904,41 +758,42 @@ func TestMsgCommunityPoolSpend(t *testing.T) { func TestMsgDepositValidatorRewardsPool(t *testing.T) { t.Parallel() - f := initFixture(t) + f := createTestFixture(t) - require.NoError(t, f.distrKeeper.Params.Set(f.sdkCtx, distrtypes.DefaultParams())) - err := f.distrKeeper.FeePool.Set(f.sdkCtx, distrtypes.FeePool{ + require.NoError(t, f.distrKeeper.Params.Set(f.ctx, distrtypes.DefaultParams())) + err := f.distrKeeper.FeePool.Set(f.ctx, distrtypes.FeePool{ CommunityPool: sdk.NewDecCoins(sdk.DecCoin{Denom: "stake", Amount: math.LegacyNewDec(100)}), }) require.NoError(t, err) - initTokens := f.stakingKeeper.TokensFromConsensusPower(f.sdkCtx, int64(10000)) - require.NoError(t, f.bankKeeper.MintCoins(f.sdkCtx, distrtypes.ModuleName, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens)))) + initTokens := f.stakingKeeper.TokensFromConsensusPower(f.ctx, int64(10000)) + require.NoError(t, f.bankKeeper.MintCoins(f.ctx, distrtypes.ModuleName, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens)))) // Set default staking params - require.NoError(t, f.stakingKeeper.Params.Set(f.sdkCtx, stakingtypes.DefaultParams())) + require.NoError(t, f.stakingKeeper.Params.Set(f.ctx, stakingtypes.DefaultParams())) addr := sdk.AccAddress("addr") addr1 := sdk.AccAddress(PKS[0].Address()) valAddr1 := sdk.ValAddress(addr1) // send funds to val addr - tokens := f.stakingKeeper.TokensFromConsensusPower(f.sdkCtx, int64(1000)) - err = f.bankKeeper.SendCoinsFromModuleToAccount(f.sdkCtx, distrtypes.ModuleName, sdk.AccAddress(valAddr1), sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, tokens))) + tokens := f.stakingKeeper.TokensFromConsensusPower(f.ctx, int64(1000)) + err = f.bankKeeper.SendCoinsFromModuleToAccount(f.ctx, distrtypes.ModuleName, sdk.AccAddress(valAddr1), sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, tokens))) require.NoError(t, err) // send funds from module to addr to perform DepositValidatorRewardsPool - err = f.bankKeeper.SendCoinsFromModuleToAccount(f.sdkCtx, distrtypes.ModuleName, addr, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, tokens))) - f.accountKeeper.SetAccount(f.sdkCtx, f.accountKeeper.NewAccountWithAddress(f.sdkCtx, sdk.AccAddress(valAddr1))) + err = f.bankKeeper.SendCoinsFromModuleToAccount(f.ctx, distrtypes.ModuleName, addr, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, tokens))) + f.authKeeper.SetAccount(f.ctx, f.authKeeper.NewAccountWithAddress(f.ctx, sdk.AccAddress(valAddr1))) require.NoError(t, err) - tstaking := stakingtestutil.NewHelper(t, f.sdkCtx, f.stakingKeeper) + + tstaking := stakingtestutil.NewHelper(t, f.ctx, f.stakingKeeper) tstaking.Commission = stakingtypes.NewCommissionRates(math.LegacyNewDecWithPrec(5, 1), math.LegacyNewDecWithPrec(5, 1), math.LegacyNewDec(0)) tstaking.CreateValidator(valAddr1, valConsPk0, math.NewInt(100), true) // mint a non-staking token and send to an account amt := sdk.NewCoins(sdk.NewInt64Coin("foo", 500)) - require.NoError(t, f.bankKeeper.MintCoins(f.sdkCtx, distrtypes.ModuleName, amt)) - require.NoError(t, f.bankKeeper.SendCoinsFromModuleToAccount(f.sdkCtx, distrtypes.ModuleName, addr, amt)) + require.NoError(t, f.bankKeeper.MintCoins(f.ctx, distrtypes.ModuleName, amt)) + require.NoError(t, f.bankKeeper.SendCoinsFromModuleToAccount(f.ctx, distrtypes.ModuleName, addr, amt)) - bondDenom, err := f.stakingKeeper.BondDenom(f.sdkCtx) + bondDenom, err := f.stakingKeeper.BondDenom(f.ctx) require.NoError(t, err) testCases := []struct { @@ -975,11 +830,17 @@ func TestMsgDepositValidatorRewardsPool(t *testing.T) { }, } + msgServer := distrkeeper.NewMsgServerImpl(f.distrKeeper) + for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { res, err := f.app.RunMsg( - tc.msg, - integration.WithAutomaticFinalizeBlock(), + t, + f.ctx, + func(ctx context.Context) (transaction.Msg, error) { + resp, e := msgServer.DepositValidatorRewardsPool(ctx, tc.msg) + return resp, e + }, integration.WithAutomaticCommit(), ) if tc.expErr { @@ -989,15 +850,14 @@ func TestMsgDepositValidatorRewardsPool(t *testing.T) { assert.Assert(t, res != nil) // check the result - result := distrtypes.MsgDepositValidatorRewardsPoolResponse{} - err = f.cdc.Unmarshal(res.Value, &result) - assert.NilError(t, err) + _, ok := res.(*distrtypes.MsgDepositValidatorRewardsPoolResponse) + assert.Assert(t, ok, true) val, err := sdk.ValAddressFromBech32(tc.msg.ValidatorAddress) assert.NilError(t, err) // check validator outstanding rewards - outstandingRewards, err := f.distrKeeper.ValidatorOutstandingRewards.Get(f.sdkCtx, val) + outstandingRewards, err := f.distrKeeper.ValidatorOutstandingRewards.Get(f.ctx, val) assert.NilError(t, err) for _, c := range tc.msg.Amount { x := outstandingRewards.Rewards.AmountOf(c.Denom) diff --git a/tests/integration/v2/services.go b/tests/integration/v2/services.go index 8cffbc0445ee..0773ecca3900 100644 --- a/tests/integration/v2/services.go +++ b/tests/integration/v2/services.go @@ -11,6 +11,7 @@ import ( "cosmossdk.io/core/comet" "cosmossdk.io/core/event" "cosmossdk.io/core/gas" + "cosmossdk.io/core/header" "cosmossdk.io/core/router" "cosmossdk.io/core/server" corestore "cosmossdk.io/core/store" @@ -69,6 +70,7 @@ var contextKey = contextKeyType{} type integrationContext struct { state corestore.WriterMap gasMeter gas.Meter + header header.Info } func GasMeterFromContext(ctx context.Context) gas.Meter { @@ -196,3 +198,15 @@ func (rs RouterService) Invoke(ctx context.Context, req transaction.Msg) (transa } return rs.handlers[typeUrl](ctx, req) } + +var _ header.Service = &HeaderService{} + +type HeaderService struct{} + +func (h *HeaderService) HeaderInfo(ctx context.Context) header.Info { + iCtx, ok := ctx.Value(contextKey).(*integrationContext) + if !ok { + return header.Info{} + } + return iCtx.header +} diff --git a/testutil/configurator/configurator.go b/testutil/configurator/configurator.go index 98b248cb361b..82399afa10bd 100644 --- a/testutil/configurator/configurator.go +++ b/testutil/configurator/configurator.go @@ -163,7 +163,7 @@ func AuthModule() ModuleOption { Bech32Prefix: "cosmos", ModuleAccountPermissions: []*authmodulev1.ModuleAccountPermission{ {Account: "fee_collector"}, - {Account: testutil.DistributionModuleName}, + {Account: testutil.DistributionModuleName, Permissions: []string{"minter"}}, {Account: testutil.MintModuleName, Permissions: []string{"minter"}}, {Account: "bonded_tokens_pool", Permissions: []string{"burner", testutil.StakingModuleName}}, {Account: "not_bonded_tokens_pool", Permissions: []string{"burner", testutil.StakingModuleName}}, @@ -178,6 +178,18 @@ func AuthModule() ModuleOption { } } +func AuthModuleWithMaccPerms(maccPerms []*authmodulev1.ModuleAccountPermission) ModuleOption { + return func(config *Config) { + config.ModuleConfigs[testutil.AuthModuleName] = &appv1alpha1.ModuleConfig{ + Name: testutil.AuthModuleName, + Config: appconfig.WrapAny(&authmodulev1.Module{ + Bech32Prefix: "cosmos", + ModuleAccountPermissions: maccPerms, + }), + } + } +} + func ParamsModule() ModuleOption { return func(config *Config) { config.ModuleConfigs[testutil.ParamsModuleName] = &appv1alpha1.ModuleConfig{ diff --git a/x/staking/testutil/helpers.go b/x/staking/testutil/helpers.go index 8976c6a684b5..65a95158d482 100644 --- a/x/staking/testutil/helpers.go +++ b/x/staking/testutil/helpers.go @@ -23,14 +23,14 @@ type Helper struct { msgSrvr stakingtypes.MsgServer k *keeper.Keeper - Ctx sdk.Context + Ctx context.Context Commission stakingtypes.CommissionRates // Coin Denomination Denom string } // NewHelper creates a new instance of Helper. -func NewHelper(t *testing.T, ctx sdk.Context, k *keeper.Keeper) *Helper { +func NewHelper(t *testing.T, ctx context.Context, k *keeper.Keeper) *Helper { t.Helper() return &Helper{t, keeper.NewMsgServerImpl(k), k, ctx, ZeroCommission(), sdk.DefaultBondDenom} } @@ -126,19 +126,21 @@ func (sh *Helper) CheckValidator(addr sdk.ValAddress, status stakingtypes.BondSt // TurnBlock calls EndBlocker and updates the block time func (sh *Helper) TurnBlock(newTime time.Time) sdk.Context { - sh.Ctx = sh.Ctx.WithHeaderInfo(header.Info{Time: newTime}) + sdkCtx := sdk.UnwrapSDKContext(sh.Ctx) + sh.Ctx = sdkCtx.WithHeaderInfo(header.Info{Time: newTime}) _, err := sh.k.EndBlocker(sh.Ctx) require.NoError(sh.t, err) - return sh.Ctx + return sdkCtx } // TurnBlockTimeDiff calls EndBlocker and updates the block time by adding the // duration to the current block time func (sh *Helper) TurnBlockTimeDiff(diff time.Duration) sdk.Context { - sh.Ctx = sh.Ctx.WithHeaderInfo(header.Info{Time: sh.Ctx.HeaderInfo().Time.Add(diff)}) + sdkCtx := sdk.UnwrapSDKContext(sh.Ctx) + sh.Ctx = sdkCtx.WithHeaderInfo(header.Info{Time: sdkCtx.HeaderInfo().Time.Add(diff)}) _, err := sh.k.EndBlocker(sh.Ctx) require.NoError(sh.t, err) - return sh.Ctx + return sdkCtx } // ZeroCommission constructs a commission rates with all zeros. From 2964c5ef63968054eed587d520f3bedcea9ce41f Mon Sep 17 00:00:00 2001 From: johson-ll Date: Tue, 3 Dec 2024 18:34:13 +0800 Subject: [PATCH 11/17] test: add test case for memiterator.go (#22725) Co-authored-by: heren-ke --- store/cachekv/internal/memiterator_test.go | 73 ++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 store/cachekv/internal/memiterator_test.go diff --git a/store/cachekv/internal/memiterator_test.go b/store/cachekv/internal/memiterator_test.go new file mode 100644 index 000000000000..25e049cbe23a --- /dev/null +++ b/store/cachekv/internal/memiterator_test.go @@ -0,0 +1,73 @@ +package internal + +import ( + "testing" +) + +func TestMemIterator_Ascending(t *testing.T) { + db := NewBTree() + // db.set() + db.Set([]byte("a"), []byte("value_a")) + db.Set([]byte("b"), []byte("value_b")) + db.Set([]byte("c"), []byte("value_c")) + + iterator := newMemIterator([]byte("a"), []byte("c"), db, true) + + var result []string + for iterator.Valid() { + result = append(result, string(iterator.Key())) + iterator.Next() + } + + expected := []string{"a", "b", "c"} + for i, key := range result { + if key != expected[i] { + t.Errorf("Expected %s, got %s", expected[i], key) + } + } + + if iterator.Valid() { + t.Errorf("Iterator should be invalid after last item") + } +} + +func TestMemIterator_Descending(t *testing.T) { + db := NewBTree() + + db.Set([]byte("a"), []byte("value_a")) + db.Set([]byte("b"), []byte("value_b")) + db.Set([]byte("c"), []byte("value_c")) + db.Set([]byte("d"), []byte("value_d")) + + iterator := newMemIterator([]byte("a"), []byte("d"), db, false) + + var result []string + for iterator.Valid() { + result = append(result, string(iterator.Key())) + iterator.Next() + } + + expected := []string{"c", "b", "a"} + for i, key := range result { + if key != expected[i] { + t.Errorf("Expected %s, got %s", expected[i], key) + } + } + + if iterator.Valid() { + t.Errorf("Iterator should be invalid after last item") + } +} + +func TestMemIterator_EmptyRange(t *testing.T) { + db := NewBTree() + db.Set([]byte("a"), []byte("value_a")) + db.Set([]byte("b"), []byte("value_b")) + db.Set([]byte("c"), []byte("value_c")) + + iterator := newMemIterator([]byte("d"), []byte("e"), db, true) + + if iterator.Valid() { + t.Errorf("Iterator should be invalid for empty range") + } +} From 0e31188d50e45692cdfd8dfdae9b422016c297c8 Mon Sep 17 00:00:00 2001 From: Akhil Kumar P <36399231+akhilkumarpilli@users.noreply.github.com> Date: Tue, 3 Dec 2024 17:38:27 +0530 Subject: [PATCH 12/17] test(integration): port x/evidence tests to server v2 (#22709) --- tests/integration/evidence/app_config.go | 28 -- tests/integration/v2/app.go | 3 +- .../v2/distribution/module_test.go | 4 +- .../{ => v2}/evidence/genesis_test.go | 26 +- .../keeper => v2/evidence}/infraction_test.go | 246 ++++++------------ tests/integration/v2/services.go | 22 ++ 6 files changed, 118 insertions(+), 211 deletions(-) delete mode 100644 tests/integration/evidence/app_config.go rename tests/integration/{ => v2}/evidence/genesis_test.go (85%) rename tests/integration/{evidence/keeper => v2/evidence}/infraction_test.go (56%) diff --git a/tests/integration/evidence/app_config.go b/tests/integration/evidence/app_config.go deleted file mode 100644 index f328a31b056a..000000000000 --- a/tests/integration/evidence/app_config.go +++ /dev/null @@ -1,28 +0,0 @@ -package evidence_test - -import ( - _ "cosmossdk.io/x/accounts" // import as blank for app wiring - _ "cosmossdk.io/x/bank" // import as blank for app wiring - _ "cosmossdk.io/x/consensus" // import as blank for app wiring - _ "cosmossdk.io/x/evidence" // import as blank for app wiring - _ "cosmossdk.io/x/slashing" // import as blank for app wiring - _ "cosmossdk.io/x/staking" // import as blank for app wiring - - "github.com/cosmos/cosmos-sdk/testutil/configurator" - _ "github.com/cosmos/cosmos-sdk/x/auth" // import as blank for app wiring - _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config" // import as blank for app wiring - _ "github.com/cosmos/cosmos-sdk/x/genutil" // import as blank for app wiring -) - -var AppConfig = configurator.NewAppConfig( - configurator.AccountsModule(), - configurator.AuthModule(), - configurator.BankModule(), - configurator.StakingModule(), - configurator.SlashingModule(), - configurator.TxModule(), - configurator.ValidateModule(), - configurator.ConsensusModule(), - configurator.EvidenceModule(), - configurator.GenutilModule(), -) diff --git a/tests/integration/v2/app.go b/tests/integration/v2/app.go index 257fc3c700b5..8eba29665cf3 100644 --- a/tests/integration/v2/app.go +++ b/tests/integration/v2/app.go @@ -335,7 +335,8 @@ func (a *App) Deliver( resp, state, err := a.DeliverBlock(ctx, req) require.NoError(t, err) a.lastHeight++ - // update block heigh if integeration context is present + + // update block height if integration context is present iCtx, ok := ctx.Value(contextKey).(*integrationContext) if ok { iCtx.header.Height = int64(a.lastHeight) diff --git a/tests/integration/v2/distribution/module_test.go b/tests/integration/v2/distribution/module_test.go index bb297800b8e5..75098bd1ec49 100644 --- a/tests/integration/v2/distribution/module_test.go +++ b/tests/integration/v2/distribution/module_test.go @@ -3,9 +3,11 @@ package distribution import ( "testing" + "gotest.tools/v3/assert" + "cosmossdk.io/x/distribution/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" - "gotest.tools/v3/assert" ) func TestItCreatesModuleAccountOnInitBlock(t *testing.T) { diff --git a/tests/integration/evidence/genesis_test.go b/tests/integration/v2/evidence/genesis_test.go similarity index 85% rename from tests/integration/evidence/genesis_test.go rename to tests/integration/v2/evidence/genesis_test.go index c2cb89c406fc..5e0cf68d66d6 100644 --- a/tests/integration/evidence/genesis_test.go +++ b/tests/integration/v2/evidence/genesis_test.go @@ -1,45 +1,33 @@ -package evidence_test +package evidence import ( + "context" "fmt" "testing" "time" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "cosmossdk.io/depinject" - "cosmossdk.io/log" "cosmossdk.io/x/evidence" "cosmossdk.io/x/evidence/exported" "cosmossdk.io/x/evidence/keeper" "cosmossdk.io/x/evidence/types" "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" - simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" - sdk "github.com/cosmos/cosmos-sdk/types" ) type GenesisTestSuite struct { suite.Suite - ctx sdk.Context + ctx context.Context keeper keeper.Keeper } func (suite *GenesisTestSuite) SetupTest() { - var evidenceKeeper keeper.Keeper - - app, err := simtestutil.Setup( - depinject.Configs( - depinject.Supply(log.NewNopLogger()), - AppConfig, - ), - &evidenceKeeper) - require.NoError(suite.T(), err) - - suite.ctx = app.BaseApp.NewContext(false) - suite.keeper = evidenceKeeper + f := initFixture(suite.T()) + + suite.ctx = f.ctx + suite.keeper = f.evidenceKeeper } func (suite *GenesisTestSuite) TestInitGenesis() { diff --git a/tests/integration/evidence/keeper/infraction_test.go b/tests/integration/v2/evidence/infraction_test.go similarity index 56% rename from tests/integration/evidence/keeper/infraction_test.go rename to tests/integration/v2/evidence/infraction_test.go index 76f47fb59efe..16cd31f4e83d 100644 --- a/tests/integration/evidence/keeper/infraction_test.go +++ b/tests/integration/v2/evidence/infraction_test.go @@ -1,4 +1,4 @@ -package keeper_test +package evidence import ( "bytes" @@ -8,52 +8,45 @@ import ( "testing" "time" - "go.uber.org/mock/gomock" + "github.com/stretchr/testify/require" "gotest.tools/v3/assert" "cosmossdk.io/collections" - "cosmossdk.io/core/appmodule" "cosmossdk.io/core/comet" "cosmossdk.io/core/header" + "cosmossdk.io/depinject" "cosmossdk.io/log" - storetypes "cosmossdk.io/store/types" - "cosmossdk.io/x/bank" + "cosmossdk.io/runtime/v2/services" + _ "cosmossdk.io/x/accounts" // import as blank for app wiring + _ "cosmossdk.io/x/bank" // import as blank for app wiring bankkeeper "cosmossdk.io/x/bank/keeper" - banktypes "cosmossdk.io/x/bank/types" - "cosmossdk.io/x/consensus" - consensusparamkeeper "cosmossdk.io/x/consensus/keeper" - consensusparamtypes "cosmossdk.io/x/consensus/types" - "cosmossdk.io/x/evidence" + _ "cosmossdk.io/x/consensus" // import as blank for app wiring + consensuskeeper "cosmossdk.io/x/consensus/keeper" + _ "cosmossdk.io/x/evidence" // import as blank for app wiring "cosmossdk.io/x/evidence/exported" "cosmossdk.io/x/evidence/keeper" evidencetypes "cosmossdk.io/x/evidence/types" minttypes "cosmossdk.io/x/mint/types" - pooltypes "cosmossdk.io/x/protocolpool/types" - "cosmossdk.io/x/slashing" + _ "cosmossdk.io/x/slashing" // import as blank for app wiring slashingkeeper "cosmossdk.io/x/slashing/keeper" - "cosmossdk.io/x/slashing/testutil" slashingtypes "cosmossdk.io/x/slashing/types" - "cosmossdk.io/x/staking" + _ "cosmossdk.io/x/staking" // import as blank for app wiring stakingkeeper "cosmossdk.io/x/staking/keeper" stakingtestutil "cosmossdk.io/x/staking/testutil" stakingtypes "cosmossdk.io/x/staking/types" - "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/codec" - addresscodec "github.com/cosmos/cosmos-sdk/codec/address" - codectestutil "github.com/cosmos/cosmos-sdk/codec/testutil" "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" - "github.com/cosmos/cosmos-sdk/runtime" - "github.com/cosmos/cosmos-sdk/testutil/integration" + "github.com/cosmos/cosmos-sdk/tests/integration/v2" + "github.com/cosmos/cosmos-sdk/testutil/configurator" simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" sdk "github.com/cosmos/cosmos-sdk/types" - moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" - "github.com/cosmos/cosmos-sdk/x/auth" + _ "github.com/cosmos/cosmos-sdk/x/auth" // import as blank for app wiring authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" - authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation" - authtestutil "github.com/cosmos/cosmos-sdk/x/auth/testutil" - authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config" // import as blank for app wiring + _ "github.com/cosmos/cosmos-sdk/x/auth/vesting" // import as blank for app wiring + _ "github.com/cosmos/cosmos-sdk/x/genutil" // import as blank for app wiring ) var ( @@ -72,137 +65,62 @@ var ( // The default power validators are initialized to have within tests initAmt = sdk.TokensFromConsensusPower(200, sdk.DefaultPowerReduction) initCoins = sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initAmt)) - cometInfoService = runtime.NewContextAwareCometInfoService() + cometInfoService = &services.ContextAwareCometInfoService{} ) type fixture struct { app *integration.App - sdkCtx sdk.Context - cdc codec.Codec + ctx context.Context + cdc codec.Codec - accountKeeper authkeeper.AccountKeeper - bankKeeper bankkeeper.Keeper - evidenceKeeper *keeper.Keeper - slashingKeeper slashingkeeper.Keeper - stakingKeeper *stakingkeeper.Keeper + accountKeeper authkeeper.AccountKeeper + bankKeeper bankkeeper.Keeper + evidenceKeeper keeper.Keeper + slashingKeeper slashingkeeper.Keeper + stakingKeeper *stakingkeeper.Keeper + consensusKeeper consensuskeeper.Keeper } -func initFixture(tb testing.TB) *fixture { - tb.Helper() - keys := storetypes.NewKVStoreKeys( - authtypes.StoreKey, banktypes.StoreKey, consensusparamtypes.StoreKey, evidencetypes.StoreKey, stakingtypes.StoreKey, slashingtypes.StoreKey, - ) - encodingCfg := moduletestutil.MakeTestEncodingConfig(codectestutil.CodecOptions{}, auth.AppModule{}, evidence.AppModule{}) - cdc := encodingCfg.Codec - msgRouter := baseapp.NewMsgServiceRouter() - grpcQueryRouter := baseapp.NewGRPCQueryRouter() - - logger := log.NewTestLogger(tb) - authority := authtypes.NewModuleAddress("gov") - - // gomock initializations - ctrl := gomock.NewController(tb) - acctsModKeeper := authtestutil.NewMockAccountsModKeeper(ctrl) - accNum := uint64(0) - acctsModKeeper.EXPECT().NextAccountNumber(gomock.Any()).AnyTimes().DoAndReturn(func(ctx context.Context) (uint64, error) { - currentNum := accNum - accNum++ - return currentNum, nil - }) - - maccPerms := map[string][]string{ - pooltypes.ModuleName: {}, - minttypes.ModuleName: {authtypes.Minter}, - stakingtypes.BondedPoolName: {authtypes.Burner, authtypes.Staking}, - stakingtypes.NotBondedPoolName: {authtypes.Burner, authtypes.Staking}, +func initFixture(t *testing.T) *fixture { + t.Helper() + + res := fixture{} + + moduleConfigs := []configurator.ModuleOption{ + configurator.AccountsModule(), + configurator.AuthModule(), + configurator.BankModule(), + configurator.StakingModule(), + configurator.SlashingModule(), + configurator.TxModule(), + configurator.ValidateModule(), + configurator.ConsensusModule(), + configurator.EvidenceModule(), + configurator.GenutilModule(), } - accountKeeper := authkeeper.NewAccountKeeper( - runtime.NewEnvironment(runtime.NewKVStoreService(keys[authtypes.StoreKey]), log.NewNopLogger()), - cdc, - authtypes.ProtoBaseAccount, - acctsModKeeper, - maccPerms, - addresscodec.NewBech32Codec(sdk.Bech32MainPrefix), - sdk.Bech32MainPrefix, - authority.String(), - ) - - blockedAddresses := map[string]bool{ - accountKeeper.GetAuthority(): false, - } - bankKeeper := bankkeeper.NewBaseKeeper( - runtime.NewEnvironment(runtime.NewKVStoreService(keys[banktypes.StoreKey]), log.NewNopLogger()), - cdc, - accountKeeper, - blockedAddresses, - authority.String(), - ) + startupCfg := integration.DefaultStartUpConfig(t) + startupCfg.BranchService = &integration.BranchService{} + startupCfg.HeaderService = &integration.HeaderService{} - consensusParamsKeeper := consensusparamkeeper.NewKeeper(cdc, runtime.NewEnvironment(runtime.NewKVStoreService(keys[consensusparamtypes.StoreKey]), log.NewNopLogger(), runtime.EnvWithQueryRouterService(grpcQueryRouter), runtime.EnvWithMsgRouterService(msgRouter)), authtypes.NewModuleAddress("gov").String()) - - stakingKeeper := stakingkeeper.NewKeeper(cdc, runtime.NewEnvironment(runtime.NewKVStoreService(keys[stakingtypes.StoreKey]), log.NewNopLogger(), runtime.EnvWithQueryRouterService(grpcQueryRouter), runtime.EnvWithMsgRouterService(msgRouter)), accountKeeper, bankKeeper, consensusParamsKeeper, authority.String(), addresscodec.NewBech32Codec(sdk.Bech32PrefixValAddr), addresscodec.NewBech32Codec(sdk.Bech32PrefixConsAddr), runtime.NewContextAwareCometInfoService()) - - slashingKeeper := slashingkeeper.NewKeeper(runtime.NewEnvironment(runtime.NewKVStoreService(keys[slashingtypes.StoreKey]), log.NewNopLogger()), cdc, codec.NewLegacyAmino(), stakingKeeper, authority.String()) - - stakingKeeper.SetHooks(stakingtypes.NewMultiStakingHooks(slashingKeeper.Hooks())) - - evidenceKeeper := keeper.NewKeeper(cdc, runtime.NewEnvironment(runtime.NewKVStoreService(keys[evidencetypes.StoreKey]), log.NewNopLogger(), runtime.EnvWithQueryRouterService(grpcQueryRouter), runtime.EnvWithMsgRouterService(msgRouter)), stakingKeeper, slashingKeeper, consensusParamsKeeper, addresscodec.NewBech32Codec(sdk.Bech32PrefixAccAddr), stakingKeeper.ConsensusAddressCodec()) - router := evidencetypes.NewRouter() - router = router.AddRoute(evidencetypes.RouteEquivocation, testEquivocationHandler(evidenceKeeper)) - evidenceKeeper.SetRouter(router) - - authModule := auth.NewAppModule(cdc, accountKeeper, acctsModKeeper, authsims.RandomGenesisAccounts, nil) - bankModule := bank.NewAppModule(cdc, bankKeeper, accountKeeper) - stakingModule := staking.NewAppModule(cdc, stakingKeeper) - slashingModule := slashing.NewAppModule(cdc, slashingKeeper, accountKeeper, bankKeeper, stakingKeeper, cdc.InterfaceRegistry(), cometInfoService) - evidenceModule := evidence.NewAppModule(cdc, *evidenceKeeper, cometInfoService) - consensusModule := consensus.NewAppModule(cdc, consensusParamsKeeper) - - integrationApp := integration.NewIntegrationApp(logger, keys, cdc, - encodingCfg.InterfaceRegistry.SigningContext().AddressCodec(), - encodingCfg.InterfaceRegistry.SigningContext().ValidatorAddressCodec(), - map[string]appmodule.AppModule{ - authtypes.ModuleName: authModule, - banktypes.ModuleName: bankModule, - stakingtypes.ModuleName: stakingModule, - slashingtypes.ModuleName: slashingModule, - evidencetypes.ModuleName: evidenceModule, - consensusparamtypes.ModuleName: consensusModule, - }, - msgRouter, - grpcQueryRouter, - ) + var err error + res.app, err = integration.NewApp( + depinject.Configs(configurator.NewAppV2Config(moduleConfigs...), depinject.Supply(log.NewNopLogger())), + startupCfg, + &res.bankKeeper, &res.accountKeeper, &res.stakingKeeper, &res.slashingKeeper, &res.evidenceKeeper, &res.consensusKeeper, &res.cdc) + require.NoError(t, err) - sdkCtx := sdk.UnwrapSDKContext(integrationApp.Context()) + res.ctx = res.app.StateLatestContext(t) - // Register MsgServer and QueryServer - evidencetypes.RegisterMsgServer(integrationApp.MsgServiceRouter(), keeper.NewMsgServerImpl(*evidenceKeeper)) - evidencetypes.RegisterQueryServer(integrationApp.QueryHelper(), keeper.NewQuerier(evidenceKeeper)) - - assert.NilError(tb, slashingKeeper.Params.Set(sdkCtx, testutil.TestParams())) - - // set default staking params - assert.NilError(tb, stakingKeeper.Params.Set(sdkCtx, stakingtypes.DefaultParams())) - - return &fixture{ - app: integrationApp, - sdkCtx: sdkCtx, - cdc: cdc, - accountKeeper: accountKeeper, - bankKeeper: bankKeeper, - evidenceKeeper: evidenceKeeper, - slashingKeeper: slashingKeeper, - stakingKeeper: stakingKeeper, - } + return &res } func TestHandleDoubleSign(t *testing.T) { t.Parallel() f := initFixture(t) - ctx := f.sdkCtx.WithIsCheckTx(false).WithBlockHeight(1) + ctx := f.ctx populateValidators(t, f) power := int64(100) @@ -210,11 +128,11 @@ func TestHandleDoubleSign(t *testing.T) { assert.NilError(t, err) operatorAddr, valpubkey := valAddresses[0], pubkeys[0] tstaking := stakingtestutil.NewHelper(t, ctx, f.stakingKeeper) - f.accountKeeper.SetAccount(f.sdkCtx, f.accountKeeper.NewAccountWithAddress(f.sdkCtx, sdk.AccAddress(operatorAddr))) + f.accountKeeper.SetAccount(f.ctx, f.accountKeeper.NewAccountWithAddress(f.ctx, sdk.AccAddress(operatorAddr))) selfDelegation := tstaking.CreateValidatorWithValPower(operatorAddr, valpubkey, power, true) // execute end-blocker and verify validator attributes - _, err = f.stakingKeeper.EndBlocker(f.sdkCtx) + _, err = f.stakingKeeper.EndBlocker(f.ctx) assert.NilError(t, err) assert.DeepEqual(t, f.bankKeeper.GetAllBalances(ctx, sdk.AccAddress(operatorAddr)).String(), @@ -224,12 +142,13 @@ func TestHandleDoubleSign(t *testing.T) { assert.NilError(t, err) assert.DeepEqual(t, selfDelegation, val.GetBondedTokens()) - assert.NilError(t, f.slashingKeeper.AddrPubkeyRelation.Set(f.sdkCtx, valpubkey.Address(), valpubkey)) + assert.NilError(t, f.slashingKeeper.AddrPubkeyRelation.Set(f.ctx, valpubkey.Address(), valpubkey)) consaddrStr, err := f.stakingKeeper.ConsensusAddressCodec().BytesToString(valpubkey.Address()) assert.NilError(t, err) - info := slashingtypes.NewValidatorSigningInfo(consaddrStr, f.sdkCtx.BlockHeight(), time.Unix(0, 0), false, int64(0)) - err = f.slashingKeeper.ValidatorSigningInfo.Set(f.sdkCtx, sdk.ConsAddress(valpubkey.Address()), info) + height := f.app.LastBlockHeight() + info := slashingtypes.NewValidatorSigningInfo(consaddrStr, int64(height), time.Unix(0, 0), false, int64(0)) + err = f.slashingKeeper.ValidatorSigningInfo.Set(f.ctx, sdk.ConsAddress(valpubkey.Address()), info) assert.NilError(t, err) // handle a signature to set signing info err = f.slashingKeeper.HandleValidatorSignature(ctx, valpubkey.Address(), selfDelegation.Int64(), comet.BlockIDFlagCommit) @@ -248,8 +167,8 @@ func TestHandleDoubleSign(t *testing.T) { }}, } - ctx = ctx.WithCometInfo(nci) - assert.NilError(t, f.evidenceKeeper.BeginBlocker(ctx.WithCometInfo(nci), cometInfoService)) + ctx = integration.SetCometInfo(ctx, nci) + assert.NilError(t, f.evidenceKeeper.BeginBlocker(ctx, cometInfoService)) // should be jailed and tombstoned val, err = f.stakingKeeper.Validator(ctx, operatorAddr) @@ -270,13 +189,12 @@ func TestHandleDoubleSign(t *testing.T) { assert.Assert(t, val.GetTokens().Equal(newTokens)) // jump to past the unbonding period - ctx = ctx.WithHeaderInfo(header.Info{Time: time.Unix(1, 0).Add(stakingParams.UnbondingTime)}) + ctx = integration.SetHeaderInfo(ctx, header.Info{Time: time.Unix(1, 0).Add(stakingParams.UnbondingTime)}) // require we cannot unjail assert.Error(t, f.slashingKeeper.Unjail(ctx, operatorAddr), slashingtypes.ErrValidatorJailed.Error()) // require we be able to unbond now - ctx = ctx.WithBlockHeight(ctx.BlockHeight() + 1) del, _ := f.stakingKeeper.Delegations.Get(ctx, collections.Join(sdk.AccAddress(operatorAddr), operatorAddr)) validator, _ := f.stakingKeeper.GetValidator(ctx, operatorAddr) totalBond := validator.TokensFromShares(del.GetShares()).TruncateInt() @@ -300,7 +218,7 @@ func TestHandleDoubleSign_TooOld(t *testing.T) { t.Parallel() f := initFixture(t) - ctx := f.sdkCtx.WithIsCheckTx(false).WithHeaderInfo(header.Info{Height: 1, Time: time.Now()}) + ctx := integration.SetHeaderInfo(f.ctx, header.Info{Height: 1, Time: time.Now()}) populateValidators(t, f) power := int64(100) @@ -309,11 +227,11 @@ func TestHandleDoubleSign_TooOld(t *testing.T) { operatorAddr, valpubkey := valAddresses[0], pubkeys[0] tstaking := stakingtestutil.NewHelper(t, ctx, f.stakingKeeper) - f.accountKeeper.SetAccount(f.sdkCtx, f.accountKeeper.NewAccountWithAddress(f.sdkCtx, sdk.AccAddress(operatorAddr))) + f.accountKeeper.SetAccount(f.ctx, f.accountKeeper.NewAccountWithAddress(f.ctx, sdk.AccAddress(operatorAddr))) amt := tstaking.CreateValidatorWithValPower(operatorAddr, valpubkey, power, true) // execute end-blocker and verify validator attributes - _, err = f.stakingKeeper.EndBlocker(f.sdkCtx) + _, err = f.stakingKeeper.EndBlocker(f.ctx) assert.NilError(t, err) assert.DeepEqual(t, f.bankKeeper.GetAllBalances(ctx, sdk.AccAddress(operatorAddr)), @@ -326,16 +244,19 @@ func TestHandleDoubleSign_TooOld(t *testing.T) { nci := comet.Info{Evidence: []comet.Evidence{{ Validator: comet.Validator{Address: valpubkey.Address(), Power: power}, Type: comet.DuplicateVote, // - Time: ctx.HeaderInfo().Time, + Time: integration.HeaderInfoFromContext(ctx).Time, Height: 0, }}} - assert.NilError(t, f.app.BaseApp.StoreConsensusParams(ctx, *simtestutil.DefaultConsensusParams)) - cp := f.app.BaseApp.GetConsensusParams(ctx) + require.NotNil(t, f.consensusKeeper.ParamsStore) + require.NoError(t, f.consensusKeeper.ParamsStore.Set(ctx, *simtestutil.DefaultConsensusParams)) + cp, err := f.consensusKeeper.ParamsStore.Get(ctx) - ctx = ctx.WithCometInfo(nci) - ctx = ctx.WithConsensusParams(cp) - ctx = ctx.WithHeaderInfo(header.Info{Height: ctx.BlockHeight() + cp.Evidence.MaxAgeNumBlocks + 1, Time: ctx.HeaderInfo().Time.Add(cp.Evidence.MaxAgeDuration + 1)}) + ctx = integration.SetCometInfo(ctx, nci) + ctx = integration.SetHeaderInfo(ctx, header.Info{ + Height: int64(f.app.LastBlockHeight()) + cp.Evidence.MaxAgeNumBlocks + 1, + Time: integration.HeaderInfoFromContext(ctx).Time.Add(cp.Evidence.MaxAgeDuration + 1), + }) assert.NilError(t, f.evidenceKeeper.BeginBlocker(ctx, cometInfoService)) @@ -349,7 +270,7 @@ func TestHandleDoubleSignAfterRotation(t *testing.T) { t.Parallel() f := initFixture(t) - ctx := f.sdkCtx.WithIsCheckTx(false).WithBlockHeight(1).WithHeaderInfo(header.Info{Time: time.Now()}) + ctx := integration.SetHeaderInfo(f.ctx, header.Info{Time: time.Now()}) populateValidators(t, f) power := int64(100) @@ -358,7 +279,7 @@ func TestHandleDoubleSignAfterRotation(t *testing.T) { operatorAddr, valpubkey := valAddresses[0], pubkeys[0] tstaking := stakingtestutil.NewHelper(t, ctx, f.stakingKeeper) - f.accountKeeper.SetAccount(f.sdkCtx, f.accountKeeper.NewAccountWithAddress(f.sdkCtx, sdk.AccAddress(operatorAddr))) + f.accountKeeper.SetAccount(f.ctx, f.accountKeeper.NewAccountWithAddress(f.ctx, sdk.AccAddress(operatorAddr))) selfDelegation := tstaking.CreateValidatorWithValPower(operatorAddr, valpubkey, power, true) // execute end-blocker and verify validator attributes @@ -412,7 +333,9 @@ func TestHandleDoubleSignAfterRotation(t *testing.T) { }}, } - err = f.evidenceKeeper.BeginBlocker(ctx.WithCometInfo(nci), cometInfoService) + ctxWithCometInfo := integration.SetCometInfo(ctx, nci) + + err = f.evidenceKeeper.BeginBlocker(ctxWithCometInfo, cometInfoService) assert.NilError(t, err) // should be jailed and tombstoned @@ -428,7 +351,7 @@ func TestHandleDoubleSignAfterRotation(t *testing.T) { assert.Assert(t, newTokens.LT(oldTokens)) // submit duplicate evidence - err = f.evidenceKeeper.BeginBlocker(ctx.WithCometInfo(nci), cometInfoService) + err = f.evidenceKeeper.BeginBlocker(ctxWithCometInfo, cometInfoService) assert.NilError(t, err) // tokens should be the same (capped slash) @@ -437,13 +360,12 @@ func TestHandleDoubleSignAfterRotation(t *testing.T) { assert.Assert(t, valInfo.GetTokens().Equal(newTokens)) // jump to past the unbonding period - ctx = ctx.WithHeaderInfo(header.Info{Time: time.Unix(1, 0).Add(stakingParams.UnbondingTime)}) + ctx = integration.SetHeaderInfo(ctx, header.Info{Time: time.Unix(1, 0).Add(stakingParams.UnbondingTime)}) // require we cannot unjail assert.Error(t, f.slashingKeeper.Unjail(ctx, operatorAddr), slashingtypes.ErrValidatorJailed.Error()) // require we be able to unbond now - ctx = ctx.WithBlockHeight(ctx.BlockHeight() + 1) del, _ := f.stakingKeeper.Delegations.Get(ctx, collections.Join(sdk.AccAddress(operatorAddr), operatorAddr)) validator, _ := f.stakingKeeper.GetValidator(ctx, operatorAddr) totalBond := validator.TokensFromShares(del.GetShares()).TruncateInt() @@ -473,10 +395,10 @@ func populateValidators(t assert.TestingT, f *fixture) { // add accounts and set total supply totalSupplyAmt := initAmt.MulRaw(int64(len(valAddresses))) totalSupply := sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, totalSupplyAmt)) - assert.NilError(t, f.bankKeeper.MintCoins(f.sdkCtx, minttypes.ModuleName, totalSupply)) + assert.NilError(t, f.bankKeeper.MintCoins(f.ctx, minttypes.ModuleName, totalSupply)) for _, addr := range valAddresses { - assert.NilError(t, f.bankKeeper.SendCoinsFromModuleToAccount(f.sdkCtx, minttypes.ModuleName, (sdk.AccAddress)(addr), initCoins)) + assert.NilError(t, f.bankKeeper.SendCoinsFromModuleToAccount(f.ctx, minttypes.ModuleName, (sdk.AccAddress)(addr), initCoins)) } } diff --git a/tests/integration/v2/services.go b/tests/integration/v2/services.go index 0773ecca3900..241a07cdd82f 100644 --- a/tests/integration/v2/services.go +++ b/tests/integration/v2/services.go @@ -9,6 +9,7 @@ import ( "cosmossdk.io/core/branch" "cosmossdk.io/core/comet" + corecontext "cosmossdk.io/core/context" "cosmossdk.io/core/event" "cosmossdk.io/core/gas" "cosmossdk.io/core/header" @@ -73,6 +74,27 @@ type integrationContext struct { header header.Info } +func SetHeaderInfo(ctx context.Context, h header.Info) context.Context { + iCtx, ok := ctx.Value(contextKey).(*integrationContext) + if !ok { + return ctx + } + iCtx.header = h + return context.WithValue(ctx, contextKey, iCtx) +} + +func HeaderInfoFromContext(ctx context.Context) header.Info { + iCtx, ok := ctx.Value(contextKey).(*integrationContext) + if ok { + return iCtx.header + } + return header.Info{} +} + +func SetCometInfo(ctx context.Context, c comet.Info) context.Context { + return context.WithValue(ctx, corecontext.CometInfoKey, c) +} + func GasMeterFromContext(ctx context.Context) gas.Meter { iCtx, ok := ctx.Value(contextKey).(*integrationContext) if !ok { From 538e1d1c7cbb034507f34842d5c9704d731f28a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juli=C3=A1n=20Toledano?= Date: Tue, 3 Dec 2024 13:34:43 +0100 Subject: [PATCH 13/17] refactor(client/v2)!: remove client.Context (#22493) --- client/v2/CHANGELOG.md | 1 + client/v2/autocli/app.go | 34 ++- client/v2/autocli/builder.go | 6 + client/v2/autocli/common.go | 157 +++++++++-- client/v2/autocli/common_test.go | 17 +- client/v2/autocli/config/config.go | 133 +++++++++ client/v2/autocli/flag/address.go | 41 ++- client/v2/autocli/keyring/keyring.go | 36 ++- client/v2/autocli/msg.go | 78 ++++++ client/v2/autocli/msg_test.go | 9 +- client/v2/autocli/query.go | 32 ++- .../v2/autocli/testdata/help-echo-msg.golden | 1 + client/v2/broadcast/comet/client_conn.go | 146 ++++++++++ client/v2/broadcast/comet/comet.go | 4 +- client/v2/broadcast/comet/comet_test.go | 2 +- client/v2/context/context.go | 55 ++++ client/v2/go.mod | 8 +- client/v2/internal/flags/flags.go | 21 ++ client/v2/internal/print/printer.go | 84 ++++++ client/v2/offchain/cli.go | 67 ++++- client/v2/offchain/common_test.go | 130 +-------- client/v2/offchain/sign.go | 25 +- client/v2/offchain/sign_test.go | 21 +- client/v2/offchain/verify.go | 15 +- client/v2/offchain/verify_test.go | 28 +- client/v2/tx/encoder.go | 7 +- client/v2/tx/factory.go | 31 +-- client/v2/tx/flags.go | 34 +-- client/v2/tx/tx.go | 262 +++++++++++------- client/v2/tx/types.go | 33 +-- simapp/simd/cmd/root.go | 5 +- tests/systemtests/mint_test.go | 2 + 32 files changed, 1134 insertions(+), 391 deletions(-) create mode 100644 client/v2/autocli/config/config.go create mode 100644 client/v2/broadcast/comet/client_conn.go create mode 100644 client/v2/context/context.go create mode 100644 client/v2/internal/print/printer.go diff --git a/client/v2/CHANGELOG.md b/client/v2/CHANGELOG.md index 5cff1928e437..27f312a99925 100644 --- a/client/v2/CHANGELOG.md +++ b/client/v2/CHANGELOG.md @@ -54,6 +54,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ ### API Breaking Changes * [#17709](https://github.com/cosmos/cosmos-sdk/pull/17709) Address codecs have been removed from `autocli.AppOptions` and `flag.Builder`. Instead client/v2 uses the address codecs present in the context (introduced in [#17503](https://github.com/cosmos/cosmos-sdk/pull/17503)). +* [#22493](https://github.com/cosmos/cosmos-sdk/pull/22493) Refactored `client/v2` package to remove v1 context dependencies, while introducing new packages for client configuration, context management, and formatted output with improved transaction handling and flag support. ### Bug Fixes diff --git a/client/v2/autocli/app.go b/client/v2/autocli/app.go index 30b5138c1ee3..5e1316b4127d 100644 --- a/client/v2/autocli/app.go +++ b/client/v2/autocli/app.go @@ -3,19 +3,20 @@ package autocli import ( "github.com/cosmos/gogoproto/proto" "github.com/spf13/cobra" - "google.golang.org/grpc" "google.golang.org/protobuf/reflect/protoregistry" autocliv1 "cosmossdk.io/api/cosmos/autocli/v1" "cosmossdk.io/client/v2/autocli/flag" + "cosmossdk.io/core/address" "cosmossdk.io/core/appmodule" "cosmossdk.io/depinject" "cosmossdk.io/log" "cosmossdk.io/x/tx/signing" - "github.com/cosmos/cosmos-sdk/client" sdkflags "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/codec/types" + authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" ) // AppOptions are input options for an autocli enabled app. These options can be built via depinject based on an app config. @@ -38,8 +39,15 @@ type AppOptions struct { // module or need to be improved. ModuleOptions map[string]*autocliv1.ModuleOptions `optional:"true"` - // ClientCtx contains the necessary information needed to execute the commands. - ClientCtx client.Context + AddressCodec address.Codec // AddressCodec is used to encode/decode account addresses. + ValidatorAddressCodec address.ValidatorAddressCodec // ValidatorAddressCodec is used to encode/decode validator addresses. + ConsensusAddressCodec address.ConsensusAddressCodec // ConsensusAddressCodec is used to encode/decode consensus addresses. + + // Cdc is the codec used for binary encoding/decoding of messages. + Cdc codec.Codec + + // TxConfigOpts contains options for configuring transaction handling. + TxConfigOpts authtx.ConfigOptions skipValidation bool } @@ -63,19 +71,19 @@ func (appOptions AppOptions) EnhanceRootCommand(rootCmd *cobra.Command) error { builder := &Builder{ Builder: flag.Builder{ TypeResolver: protoregistry.GlobalTypes, - FileResolver: appOptions.ClientCtx.InterfaceRegistry, - AddressCodec: appOptions.ClientCtx.AddressCodec, - ValidatorAddressCodec: appOptions.ClientCtx.ValidatorAddressCodec, - ConsensusAddressCodec: appOptions.ClientCtx.ConsensusAddressCodec, - }, - GetClientConn: func(cmd *cobra.Command) (grpc.ClientConnInterface, error) { - return client.GetClientQueryContext(cmd) + FileResolver: appOptions.Cdc.InterfaceRegistry(), + AddressCodec: appOptions.AddressCodec, + ValidatorAddressCodec: appOptions.ValidatorAddressCodec, + ConsensusAddressCodec: appOptions.ConsensusAddressCodec, }, + GetClientConn: getQueryClientConn(appOptions.Cdc), AddQueryConnFlags: func(c *cobra.Command) { sdkflags.AddQueryFlagsToCmd(c) sdkflags.AddKeyringFlags(c.Flags()) }, - AddTxConnFlags: sdkflags.AddTxFlagsToCmd, + AddTxConnFlags: sdkflags.AddTxFlagsToCmd, + Cdc: appOptions.Cdc, + EnabledSignModes: appOptions.TxConfigOpts.EnabledSignModes, } return appOptions.EnhanceRootCommandWithBuilder(rootCmd, builder) @@ -170,9 +178,9 @@ func NewAppOptionsFromConfig( return AppOptions{ Modules: cfg.Modules, - ClientCtx: client.Context{InterfaceRegistry: interfaceRegistry}, ModuleOptions: moduleOptions, skipValidation: true, + Cdc: codec.NewProtoCodec(interfaceRegistry), }, nil } diff --git a/client/v2/autocli/builder.go b/client/v2/autocli/builder.go index 81604f0d810b..475dac8af6d6 100644 --- a/client/v2/autocli/builder.go +++ b/client/v2/autocli/builder.go @@ -5,6 +5,9 @@ import ( "google.golang.org/grpc" "cosmossdk.io/client/v2/autocli/flag" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/types/tx/signing" ) // Builder manages options for building CLI commands. @@ -19,6 +22,9 @@ type Builder struct { // AddQueryConnFlags and AddTxConnFlags are functions that add flags to query and transaction commands AddQueryConnFlags func(*cobra.Command) AddTxConnFlags func(*cobra.Command) + + Cdc codec.Codec + EnabledSignModes []signing.SignMode } // ValidateAndComplete the builder fields. diff --git a/client/v2/autocli/common.go b/client/v2/autocli/common.go index 409198267cfd..ff3b5b184f5a 100644 --- a/client/v2/autocli/common.go +++ b/client/v2/autocli/common.go @@ -1,18 +1,29 @@ package autocli import ( + "context" + "crypto/tls" "fmt" - "strings" + "strconv" "github.com/spf13/cobra" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + grpcinsecure "google.golang.org/grpc/credentials/insecure" "google.golang.org/protobuf/reflect/protoreflect" - "sigs.k8s.io/yaml" autocliv1 "cosmossdk.io/api/cosmos/autocli/v1" + apitxsigning "cosmossdk.io/api/cosmos/tx/signing/v1beta1" + "cosmossdk.io/client/v2/autocli/config" + "cosmossdk.io/client/v2/autocli/keyring" + "cosmossdk.io/client/v2/broadcast/comet" + clientcontext "cosmossdk.io/client/v2/context" "cosmossdk.io/client/v2/internal/flags" + "cosmossdk.io/client/v2/internal/print" "cosmossdk.io/client/v2/internal/util" - "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/types/tx/signing" ) type cmdType int @@ -62,8 +73,13 @@ func (b *Builder) buildMethodCommandCommon(descriptor protoreflect.MethodDescrip } cmd.Args = binder.CobraArgs + cmd.PreRunE = b.preRunE() + cmd.RunE = func(cmd *cobra.Command, args []string) error { - ctx = cmd.Context() + ctx, err = b.getContext(cmd) + if err != nil { + return err + } input, err := binder.BuildMessage(args) if err != nil { @@ -237,27 +253,132 @@ func enhanceCustomCmd(builder *Builder, cmd *cobra.Command, cmdType cmdType, mod // outOrStdoutFormat formats the output based on the output flag and writes it to the command's output stream. func (b *Builder) outOrStdoutFormat(cmd *cobra.Command, out []byte) error { - clientCtx := client.Context{} - if v := cmd.Context().Value(client.ClientContextKey); v != nil { - clientCtx = *(v.(*client.Context)) + p, err := print.NewPrinter(cmd) + if err != nil { + return err + } + return p.PrintBytes(out) +} + +// getContext creates and returns a new context.Context with an autocli.Context value. +// It initializes a printer and, if necessary, a keyring based on command flags. +func (b *Builder) getContext(cmd *cobra.Command) (context.Context, error) { + // if the command uses the keyring this must be set + var ( + k keyring.Keyring + err error + ) + if cmd.Flags().Lookup(flags.FlagKeyringDir) != nil && cmd.Flags().Lookup(flags.FlagKeyringBackend) != nil { + k, err = keyring.NewKeyringFromFlags(cmd.Flags(), b.AddressCodec, cmd.InOrStdin(), b.Cdc) + if err != nil { + return nil, err + } + } else { + k = keyring.NoKeyring{} } - flagSet := cmd.Flags() - if clientCtx.OutputFormat == "" || flagSet.Changed(flags.FlagOutput) { - output, _ := flagSet.GetString(flags.FlagOutput) - clientCtx = clientCtx.WithOutputFormat(output) + + clientCtx := clientcontext.Context{ + Flags: cmd.Flags(), + AddressCodec: b.AddressCodec, + ValidatorAddressCodec: b.ValidatorAddressCodec, + ConsensusAddressCodec: b.ConsensusAddressCodec, + Cdc: b.Cdc, + Keyring: k, + EnabledSignModes: signModesToApiSignModes(b.EnabledSignModes), } - var err error - outputType := clientCtx.OutputFormat - // if the output type is text, convert the json to yaml - // if output type is json or nil, default to json - if outputType == flags.OutputFormatText { - out, err = yaml.JSONToYAML(out) + return clientcontext.SetInContext(cmd.Context(), clientCtx), nil +} + +// preRunE returns a function that sets flags from the configuration before running a command. +// It is used as a PreRunE hook for cobra commands to ensure flags are properly initialized +// from the configuration before command execution. +func (b *Builder) preRunE() func(cmd *cobra.Command, args []string) error { + return func(cmd *cobra.Command, args []string) error { + err := b.setFlagsFromConfig(cmd) if err != nil { return err } + + return nil + } +} + +// setFlagsFromConfig sets command flags from the provided configuration. +// It only sets flags that haven't been explicitly changed by the user. +func (b *Builder) setFlagsFromConfig(cmd *cobra.Command) error { + conf, err := config.CreateClientConfigFromFlags(cmd.Flags()) + if err != nil { + return err + } + + flagsToSet := map[string]string{ + flags.FlagChainID: conf.ChainID, + flags.FlagKeyringBackend: conf.KeyringBackend, + flags.FlagFrom: conf.KeyringDefaultKeyName, + flags.FlagOutput: conf.Output, + flags.FlagNode: conf.Node, + flags.FlagBroadcastMode: conf.BroadcastMode, + flags.FlagGrpcAddress: conf.GRPC.Address, + flags.FlagGrpcInsecure: strconv.FormatBool(conf.GRPC.Insecure), + } + + for flagName, value := range flagsToSet { + if flag := cmd.Flags().Lookup(flagName); flag != nil && !cmd.Flags().Changed(flagName) { + if err := cmd.Flags().Set(flagName, value); err != nil { + return err + } + } } - cmd.Println(strings.TrimSpace(string(out))) return nil } + +// getQueryClientConn returns a function that creates a gRPC client connection based on command flags. +// It handles the creation of secure or insecure connections and falls back to a CometBFT broadcaster +// if no gRPC address is specified. +func getQueryClientConn(cdc codec.Codec) func(cmd *cobra.Command) (grpc.ClientConnInterface, error) { + return func(cmd *cobra.Command) (grpc.ClientConnInterface, error) { + var err error + creds := grpcinsecure.NewCredentials() + + insecure := true + if cmd.Flags().Lookup(flags.FlagGrpcInsecure) != nil { + insecure, err = cmd.Flags().GetBool(flags.FlagGrpcInsecure) + if err != nil { + return nil, err + } + } + if !insecure { + creds = credentials.NewTLS(&tls.Config{MinVersion: tls.VersionTLS12}) + } + + var addr string + if cmd.Flags().Lookup(flags.FlagGrpcAddress) != nil { + addr, err = cmd.Flags().GetString(flags.FlagGrpcAddress) + if err != nil { + return nil, err + } + } + if addr == "" { + // if grpc-addr has not been set, use the default clientConn + // TODO: default is comet + node, err := cmd.Flags().GetString(flags.FlagNode) + if err != nil { + return nil, err + } + return comet.NewCometBFTBroadcaster(node, comet.BroadcastSync, cdc) + } + + return grpc.NewClient(addr, []grpc.DialOption{grpc.WithTransportCredentials(creds)}...) + } +} + +// signModesToApiSignModes converts a slice of signing.SignMode to a slice of apitxsigning.SignMode. +func signModesToApiSignModes(modes []signing.SignMode) []apitxsigning.SignMode { + r := make([]apitxsigning.SignMode, len(modes)) + for i, m := range modes { + r[i] = apitxsigning.SignMode(m) + } + return r +} diff --git a/client/v2/autocli/common_test.go b/client/v2/autocli/common_test.go index 30827fb3d278..b40f1a6dbc26 100644 --- a/client/v2/autocli/common_test.go +++ b/client/v2/autocli/common_test.go @@ -32,6 +32,10 @@ type fixture struct { conn *testClientConn b *Builder clientCtx client.Context + + home string + chainID string + kBackend string } func initFixture(t *testing.T) *fixture { @@ -85,7 +89,8 @@ func initFixture(t *testing.T) *fixture { return conn, nil }, AddQueryConnFlags: flags.AddQueryFlagsToCmd, - AddTxConnFlags: flags.AddTxFlagsToCmd, + AddTxConnFlags: addTxAndGlobalFlagsToCmd, + Cdc: encodingConfig.Codec, } assert.NilError(t, b.ValidateAndComplete()) @@ -93,9 +98,19 @@ func initFixture(t *testing.T) *fixture { conn: conn, b: b, clientCtx: clientCtx, + + home: home, + chainID: "autocli-test", + kBackend: sdkkeyring.BackendMemory, } } +func addTxAndGlobalFlagsToCmd(cmd *cobra.Command) { + f := cmd.Flags() + f.String("home", "", "home directory") + flags.AddTxFlagsToCmd(cmd) +} + func runCmd(fixture *fixture, command func(moduleName string, f *fixture) (*cobra.Command, error), args ...string) (*bytes.Buffer, error) { out := &bytes.Buffer{} cmd, err := command("test", fixture) diff --git a/client/v2/autocli/config/config.go b/client/v2/autocli/config/config.go new file mode 100644 index 000000000000..c775acc5ce18 --- /dev/null +++ b/client/v2/autocli/config/config.go @@ -0,0 +1,133 @@ +package config + +import ( + "errors" + "fmt" + "os" + "path" + "path/filepath" + "strings" + + "github.com/pelletier/go-toml/v2" + "github.com/spf13/pflag" + "github.com/spf13/viper" + + "cosmossdk.io/client/v2/internal/flags" +) + +type Config struct { + ChainID string `mapstructure:"chain-id" toml:"chain-id" comment:"The chain ID of the blockchain network"` + KeyringBackend string `mapstructure:"keyring-backend" toml:"keyring-backend" comment:"The keyring backend to use (os|file|kwallet|pass|test|memory)"` + KeyringDefaultKeyName string `mapstructure:"keyring-default-keyname" toml:"keyring-default-keyname" comment:"The default key name to use for signing transactions"` + Output string `mapstructure:"output" toml:"output" comment:"The output format for queries (text|json)"` + Node string `mapstructure:"node" toml:"node" comment:"The RPC endpoint URL for the node to connect to"` + BroadcastMode string `mapstructure:"broadcast-mode" toml:"broadcast-mode" comment:"How transactions are broadcast to the network (sync|async|block)"` + GRPC GRPCConfig `mapstructure:",squash" comment:"The gRPC client configuration"` +} + +// GRPCConfig holds the gRPC client configuration. +type GRPCConfig struct { + Address string `mapstructure:"grpc-address" toml:"grpc-address" comment:"The gRPC server address to connect to"` + Insecure bool `mapstructure:"grpc-insecure" toml:"grpc-insecure" comment:"Allow gRPC over insecure connections"` +} + +func DefaultConfig() *Config { + return &Config{ + ChainID: "", + KeyringBackend: "os", + KeyringDefaultKeyName: "", + Output: "text", + Node: "tcp://localhost:26657", + BroadcastMode: "sync", + } +} + +// CreateClientConfig creates a new client configuration or reads an existing one. +func CreateClientConfig(homeDir, chainID string, v *viper.Viper) (*Config, error) { + if homeDir == "" { + return nil, errors.New("home dir can't be empty") + } + + configPath := filepath.Join(homeDir, "config") + configFilePath := filepath.Join(configPath, "client.toml") + + // when client.toml does not exist create and init with default values + if _, err := os.Stat(configFilePath); os.IsNotExist(err) { + if err := os.MkdirAll(configPath, os.ModePerm); err != nil { + return nil, fmt.Errorf("couldn't make client config: %w", err) + } + + conf := DefaultConfig() + if chainID != "" { + // chain-id will be written to the client.toml while initiating the chain. + conf.ChainID = chainID + } + + if err := writeConfigFile(configFilePath, conf); err != nil { + return nil, fmt.Errorf("could not write client config to the file: %w", err) + } + } + + conf, err := readConfig(configPath, v) + if err != nil { + return nil, fmt.Errorf("couldn't get client config: %w", err) + } + + return conf, nil +} + +// CreateClientConfigFromFlags creates a client configuration from command-line flags. +func CreateClientConfigFromFlags(set *pflag.FlagSet) (*Config, error) { + homeDir, _ := set.GetString(flags.FlagHome) + if homeDir == "" { + return DefaultConfig(), nil + } + chainID, _ := set.GetString(flags.FlagChainID) + + v := viper.New() + executableName, err := os.Executable() + if err != nil { + return nil, err + } + + v.SetEnvPrefix(path.Base(executableName)) + v.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")) + v.AutomaticEnv() + + return CreateClientConfig(homeDir, chainID, v) +} + +// writeConfigFile renders config using the template and writes it to +// configFilePath. +func writeConfigFile(configFilePath string, config *Config) error { + b, err := toml.Marshal(config) + if err != nil { + return err + } + + if dir := filepath.Dir(configFilePath); dir != "" { + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + return err + } + } + + return os.WriteFile(configFilePath, b, 0o600) +} + +// readConfig reads values from client.toml file and unmarshalls them into ClientConfig +func readConfig(configPath string, v *viper.Viper) (*Config, error) { + v.AddConfigPath(configPath) + v.SetConfigName("client") + v.SetConfigType("toml") + + if err := v.ReadInConfig(); err != nil { + return nil, err + } + + conf := DefaultConfig() + if err := v.Unmarshal(conf); err != nil { + return nil, err + } + + return conf, nil +} diff --git a/client/v2/autocli/flag/address.go b/client/v2/autocli/flag/address.go index 454c30a317dd..f7ba4310675f 100644 --- a/client/v2/autocli/flag/address.go +++ b/client/v2/autocli/flag/address.go @@ -7,13 +7,12 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" "cosmossdk.io/client/v2/autocli/keyring" + clientcontext "cosmossdk.io/client/v2/context" "cosmossdk.io/core/address" - "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/codec/types" cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdkkeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" ) @@ -44,19 +43,19 @@ type addressValue struct { value string } -func (a addressValue) Get(protoreflect.Value) (protoreflect.Value, error) { +func (a *addressValue) Get(protoreflect.Value) (protoreflect.Value, error) { return protoreflect.ValueOfString(a.value), nil } -func (a addressValue) String() string { +func (a *addressValue) String() string { return a.value } // Set implements the flag.Value interface for addressValue. func (a *addressValue) Set(s string) error { // we get the keyring on set, as in NewValue the context is the parent context (before RunE) - keyring := getKeyringFromCtx(a.ctx) - addr, err := keyring.LookupAddressByKeyName(s) + k := getKeyringFromCtx(a.ctx) + addr, err := k.LookupAddressByKeyName(s) if err == nil { addrStr, err := a.addressCodec.BytesToString(addr) if err != nil { @@ -77,7 +76,7 @@ func (a *addressValue) Set(s string) error { return nil } -func (a addressValue) Type() string { +func (a *addressValue) Type() string { return "account address or key name" } @@ -110,8 +109,8 @@ func (a consensusAddressValue) String() string { func (a *consensusAddressValue) Set(s string) error { // we get the keyring on set, as in NewValue the context is the parent context (before RunE) - keyring := getKeyringFromCtx(a.ctx) - addr, err := keyring.LookupAddressByKeyName(s) + k := getKeyringFromCtx(a.ctx) + addr, err := k.LookupAddressByKeyName(s) if err == nil { addrStr, err := a.addressCodec.BytesToString(addr) if err != nil { @@ -147,20 +146,18 @@ func (a *consensusAddressValue) Set(s string) error { return nil } +// getKeyringFromCtx retrieves the keyring from the provided context. +// If the context is nil or does not contain a valid client context, +// it returns a no-op keyring implementation. func getKeyringFromCtx(ctx *context.Context) keyring.Keyring { - dctx := *ctx - if dctx != nil { - if clientCtx := dctx.Value(client.ClientContextKey); clientCtx != nil { - k, err := sdkkeyring.NewAutoCLIKeyring(clientCtx.(*client.Context).Keyring, clientCtx.(*client.Context).AddressCodec) - if err != nil { - panic(fmt.Errorf("failed to create keyring: %w", err)) - } - - return k - } else if k := dctx.Value(keyring.KeyringContextKey); k != nil { - return k.(*keyring.KeyringImpl) - } + if *ctx == nil { + return keyring.NoKeyring{} + } + + c, err := clientcontext.ClientContextFromGoContext(*ctx) + if err != nil { + return keyring.NoKeyring{} } - return keyring.NoKeyring{} + return c.Keyring } diff --git a/client/v2/autocli/keyring/keyring.go b/client/v2/autocli/keyring/keyring.go index f5dce25efceb..73c523a6e0f3 100644 --- a/client/v2/autocli/keyring/keyring.go +++ b/client/v2/autocli/keyring/keyring.go @@ -1,10 +1,15 @@ package keyring import ( - "context" + "io" + + "github.com/spf13/pflag" signingv1beta1 "cosmossdk.io/api/cosmos/tx/signing/v1beta1" + "cosmossdk.io/core/address" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/cosmos/cosmos-sdk/crypto/types" ) @@ -20,9 +25,32 @@ type KeyringImpl struct { k Keyring } -// NewKeyringInContext returns a new context with the keyring set. -func NewKeyringInContext(ctx context.Context, k Keyring) context.Context { - return context.WithValue(ctx, KeyringContextKey, NewKeyringImpl(k)) +// NewKeyringFromFlags creates a new Keyring instance based on command-line flags. +// It retrieves the keyring backend and directory from flags, creates a new keyring, +// and wraps it with an AutoCLI-compatible interface. +func NewKeyringFromFlags(flagSet *pflag.FlagSet, ac address.Codec, input io.Reader, cdc codec.Codec, opts ...keyring.Option) (Keyring, error) { + backEnd, err := flagSet.GetString("keyring-backend") + if err != nil { + return nil, err + } + + keyringDir, err := flagSet.GetString("keyring-dir") + if err != nil { + return nil, err + } + if keyringDir == "" { + keyringDir, err = flagSet.GetString("home") + if err != nil { + return nil, err + } + } + + k, err := keyring.New("autoclikeyring", backEnd, keyringDir, input, cdc, opts...) + if err != nil { + return nil, err + } + + return keyring.NewAutoCLIKeyring(k, ac) } func NewKeyringImpl(k Keyring) *KeyringImpl { diff --git a/client/v2/autocli/msg.go b/client/v2/autocli/msg.go index 9eb4f0444bba..9b30a56fe375 100644 --- a/client/v2/autocli/msg.go +++ b/client/v2/autocli/msg.go @@ -1,6 +1,7 @@ package autocli import ( + "bufio" "context" "fmt" @@ -13,8 +14,11 @@ import ( autocliv1 "cosmossdk.io/api/cosmos/autocli/v1" "cosmossdk.io/client/v2/autocli/flag" "cosmossdk.io/client/v2/internal/flags" + "cosmossdk.io/client/v2/internal/print" "cosmossdk.io/client/v2/internal/util" + v2tx "cosmossdk.io/client/v2/tx" addresscodec "cosmossdk.io/core/address" + "cosmossdk.io/core/transaction" // the following will be extracted to a separate module // https://github.com/cosmos/cosmos-sdk/issues/14403 @@ -23,6 +27,7 @@ import ( authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/input" clienttx "github.com/cosmos/cosmos-sdk/client/tx" ) @@ -228,3 +233,76 @@ func (b *Builder) handleGovProposal( return clienttx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), proposal) } + +// generateOrBroadcastTxWithV2 generates or broadcasts a transaction with the provided messages using v2 transaction handling. +// +//nolint:unused // It'll be used once BuildMsgMethodCommand is updated to use factory v2. +func (b *Builder) generateOrBroadcastTxWithV2(cmd *cobra.Command, msgs ...transaction.Msg) error { + ctx, err := b.getContext(cmd) + if err != nil { + return err + } + + cConn, err := b.GetClientConn(cmd) + if err != nil { + return err + } + + var bz []byte + genOnly, _ := cmd.Flags().GetBool(v2tx.FlagGenerateOnly) + isDryRun, _ := cmd.Flags().GetBool(v2tx.FlagDryRun) + if genOnly { + bz, err = v2tx.GenerateOnly(ctx, cConn, msgs...) + } else if isDryRun { + bz, err = v2tx.DryRun(ctx, cConn, msgs...) + } else { + skipConfirm, _ := cmd.Flags().GetBool("yes") + if skipConfirm { + bz, err = v2tx.GenerateAndBroadcastTxCLI(ctx, cConn, msgs...) + } else { + bz, err = v2tx.GenerateAndBroadcastTxCLIWithPrompt(ctx, cConn, b.userConfirmation(cmd), msgs...) + } + } + if err != nil { + return err + } + + output, _ := cmd.Flags().GetString(flags.FlagOutput) + p := print.Printer{ + Output: cmd.OutOrStdout(), + OutputFormat: output, + } + + return p.PrintBytes(bz) +} + +// userConfirmation returns a function that prompts the user for confirmation +// before signing and broadcasting a transaction. +// +//nolint:unused // It is used in generateOrBroadcastTxWithV2 however linting is complaining. +func (b *Builder) userConfirmation(cmd *cobra.Command) func([]byte) (bool, error) { + format, _ := cmd.Flags().GetString(flags.FlagOutput) + printer := print.Printer{ + Output: cmd.OutOrStdout(), + OutputFormat: format, + } + + return func(bz []byte) (bool, error) { + err := printer.PrintBytes(bz) + if err != nil { + return false, err + } + buf := bufio.NewReader(cmd.InOrStdin()) + ok, err := input.GetConfirmation("confirm transaction before signing and broadcasting", buf, cmd.ErrOrStderr()) + if err != nil { + _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "error: %v\ncanceled transaction\n", err) + return false, err + } + if !ok { + _, _ = fmt.Fprintln(cmd.ErrOrStderr(), "canceled transaction") + return false, nil + } + + return true, nil + } +} diff --git a/client/v2/autocli/msg_test.go b/client/v2/autocli/msg_test.go index 11e6fd2d2fce..a86fc8ebcca7 100644 --- a/client/v2/autocli/msg_test.go +++ b/client/v2/autocli/msg_test.go @@ -55,6 +55,7 @@ func TestMsg(t *testing.T) { "cosmos1y74p8wyy4enfhfn342njve6cjmj5c8dtl6emdk", "cosmos1y74p8wyy4enfhfn342njve6cjmj5c8dtl6emdk", "1foo", "--generate-only", "--output", "json", + "--chain-id", fixture.chainID, ) assert.NilError(t, err) assertNormalizedJSONEqual(t, out.Bytes(), goldenLoad(t, "msg-output.golden")) @@ -74,6 +75,7 @@ func TestMsg(t *testing.T) { "cosmos1y74p8wyy4enfhfn342njve6cjmj5c8dtl6emdk", "cosmos1y74p8wyy4enfhfn342njve6cjmj5c8dtl6emdk", "1foo", "--generate-only", "--output", "json", + "--chain-id", fixture.chainID, ) assert.NilError(t, err) assertNormalizedJSONEqual(t, out.Bytes(), goldenLoad(t, "msg-output.golden")) @@ -93,8 +95,10 @@ func TestMsg(t *testing.T) { }), "send", "cosmos1y74p8wyy4enfhfn342njve6cjmj5c8dtl6emdk", "1foo", "--from", "cosmos1y74p8wyy4enfhfn342njve6cjmj5c8dtl6emdk", - "--generate-only", "--output", "json", + "--generate-only", + "--chain-id", fixture.chainID, + "--keyring-backend", fixture.kBackend, ) assert.NilError(t, err) assertNormalizedJSONEqual(t, out.Bytes(), goldenLoad(t, "msg-output.golden")) @@ -116,8 +120,9 @@ func TestMsg(t *testing.T) { }), "send", "cosmos1y74p8wyy4enfhfn342njve6cjmj5c8dtl6emdk", "1foo", "--sender", "cosmos1y74p8wyy4enfhfn342njve6cjmj5c8dtl6emdk", - "--generate-only", "--output", "json", + "--generate-only", + "--chain-id", fixture.chainID, ) assert.NilError(t, err) assertNormalizedJSONEqual(t, out.Bytes(), goldenLoad(t, "msg-output.golden")) diff --git a/client/v2/autocli/query.go b/client/v2/autocli/query.go index d308bcd7633a..aaf648f3578d 100644 --- a/client/v2/autocli/query.go +++ b/client/v2/autocli/query.go @@ -8,15 +8,15 @@ import ( "strings" "time" - autocliv1 "cosmossdk.io/api/cosmos/autocli/v1" - "cosmossdk.io/math" - "cosmossdk.io/x/tx/signing/aminojson" - "github.com/spf13/cobra" + "google.golang.org/grpc/metadata" "google.golang.org/protobuf/reflect/protoreflect" + autocliv1 "cosmossdk.io/api/cosmos/autocli/v1" "cosmossdk.io/client/v2/internal/flags" "cosmossdk.io/client/v2/internal/util" + "cosmossdk.io/math" + "cosmossdk.io/x/tx/signing/aminojson" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -116,7 +116,6 @@ func (b *Builder) AddQueryServiceCommands(cmd *cobra.Command, cmdDescriptor *aut // BuildQueryMethodCommand creates a gRPC query command for the given service method. This can be used to auto-generate // just a single command for a single service rpc method. func (b *Builder) BuildQueryMethodCommand(ctx context.Context, descriptor protoreflect.MethodDescriptor, options *autocliv1.RpcCommandOptions) (*cobra.Command, error) { - getClientConn := b.GetClientConn serviceDescriptor := descriptor.Parent().(protoreflect.ServiceDescriptor) methodName := fmt.Sprintf("/%s/%s", serviceDescriptor.FullName(), descriptor.Name()) outputType := util.ResolveMessageType(b.TypeResolver, descriptor.Output()) @@ -130,13 +129,13 @@ func (b *Builder) BuildQueryMethodCommand(ctx context.Context, descriptor protor } cmd, err := b.buildMethodCommandCommon(descriptor, options, func(cmd *cobra.Command, input protoreflect.Message) error { - clientConn, err := getClientConn(cmd) + clientConn, err := b.GetClientConn(cmd) if err != nil { return err } output := outputType.New() - if err := clientConn.Invoke(cmd.Context(), methodName, input.Interface(), output.Interface()); err != nil { + if err := clientConn.Invoke(b.queryContext(cmd.Context(), cmd), methodName, input.Interface(), output.Interface()); err != nil { return err } @@ -170,6 +169,25 @@ func (b *Builder) BuildQueryMethodCommand(ctx context.Context, descriptor protor return cmd, nil } +// queryContext returns a new context with metadata for block height if specified. +// If the context already has metadata, it is returned as-is. Otherwise, if a height +// flag is present on the command, it adds an x-cosmos-block-height metadata value +// with the specified height. +func (b *Builder) queryContext(ctx context.Context, cmd *cobra.Command) context.Context { + md, _ := metadata.FromOutgoingContext(ctx) + if md != nil { + return ctx + } + + md = map[string][]string{} + if cmd.Flags().Lookup("height") != nil { + h, _ := cmd.Flags().GetInt64("height") + md["x-cosmos-block-height"] = []string{fmt.Sprintf("%d", h)} + } + + return metadata.NewOutgoingContext(ctx, md) +} + func encoder(encoder aminojson.Encoder) aminojson.Encoder { return encoder.DefineTypeEncoding("google.protobuf.Duration", func(_ *aminojson.Encoder, msg protoreflect.Message, w io.Writer) error { var ( diff --git a/client/v2/autocli/testdata/help-echo-msg.golden b/client/v2/autocli/testdata/help-echo-msg.golden index 1307509569c9..0761494efde8 100644 --- a/client/v2/autocli/testdata/help-echo-msg.golden +++ b/client/v2/autocli/testdata/help-echo-msg.golden @@ -18,6 +18,7 @@ Flags: --gas-prices string Determine the transaction fee by multiplying max gas units by gas prices (e.g. 0.1uatom), rounding up to nearest denom unit --generate-only Build an unsigned transaction and write it to STDOUT (when enabled, the local Keybase only accessed when providing a key name) -h, --help help for send + --home string home directory --keyring-backend string Select keyring's backend (os|file|kwallet|pass|test|memory) (default "os") --keyring-dir string The client Keyring directory; if omitted, the default 'home' directory will be used --ledger Use a connected Ledger device diff --git a/client/v2/broadcast/comet/client_conn.go b/client/v2/broadcast/comet/client_conn.go new file mode 100644 index 000000000000..df93b1af86e3 --- /dev/null +++ b/client/v2/broadcast/comet/client_conn.go @@ -0,0 +1,146 @@ +package comet + +import ( + "context" + "errors" + "strconv" + + abci "github.com/cometbft/cometbft/api/cometbft/abci/v1" + rpcclient "github.com/cometbft/cometbft/rpc/client" + gogogrpc "github.com/cosmos/gogoproto/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + errorsmod "cosmossdk.io/errors" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const grpcBlockHeightHeader = "x-cosmos-block-height" + +var ( + _ gogogrpc.ClientConn = &CometBFTBroadcaster{} + _ grpc.ClientConnInterface = &CometBFTBroadcaster{} +) + +func (c *CometBFTBroadcaster) NewStream(_ context.Context, _ *grpc.StreamDesc, _ string, _ ...grpc.CallOption) (grpc.ClientStream, error) { + return nil, errors.New("not implemented") +} + +// Invoke implements the gRPC ClientConn interface by forwarding the RPC call to CometBFT's ABCI Query. +// It marshals the request, sends it as an ABCI query, and unmarshals the response. +func (c *CometBFTBroadcaster) Invoke(ctx context.Context, method string, req, reply interface{}, opts ...grpc.CallOption) (err error) { + reqBz, err := c.getRPCCodec().Marshal(req) + if err != nil { + return err + } + + // parse height header + md, _ := metadata.FromOutgoingContext(ctx) + var height int64 + if heights := md.Get(grpcBlockHeightHeader); len(heights) > 0 { + height, err = strconv.ParseInt(heights[0], 10, 64) + if err != nil { + return err + } + if height < 0 { + return errorsmod.Wrapf( + sdkerrors.ErrInvalidRequest, + "client.Context.Invoke: height (%d) from %q must be >= 0", height, grpcBlockHeightHeader) + } + } + + abciR := abci.QueryRequest{ + Path: method, + Data: reqBz, + Height: height, + } + + res, err := c.queryABCI(ctx, abciR) + if err != nil { + return err + } + + err = c.getRPCCodec().Unmarshal(res.Value, reply) + if err != nil { + return err + } + + // Create header metadata. For now the headers contain: + // - block height + // We then parse all the call options, if the call option is a + // HeaderCallOption, then we manually set the value of that header to the + // metadata. + md = metadata.Pairs(grpcBlockHeightHeader, strconv.FormatInt(res.Height, 10)) + for _, callOpt := range opts { + header, ok := callOpt.(grpc.HeaderCallOption) + if !ok { + continue + } + + *header.HeaderAddr = md + } + + if c.cdc.InterfaceRegistry() != nil { + return types.UnpackInterfaces(reply, c.cdc.InterfaceRegistry()) + } + + return nil +} + +// queryABCI performs an ABCI query request to the CometBFT RPC client. +// If the RPC query fails or returns a non-OK response, it will return an error. +// The response is converted from ABCI error codes to gRPC status errors. +func (c *CometBFTBroadcaster) queryABCI(ctx context.Context, req abci.QueryRequest) (abci.QueryResponse, error) { + opts := rpcclient.ABCIQueryOptions{ + Height: req.Height, + Prove: req.Prove, + } + + result, err := c.rpcClient.ABCIQueryWithOptions(ctx, req.Path, req.Data, opts) + if err != nil { + return abci.QueryResponse{}, err + } + + if !result.Response.IsOK() { + return abci.QueryResponse{}, sdkErrorToGRPCError(result.Response) + } + + return result.Response, nil +} + +// sdkErrorToGRPCError converts an ABCI query response error code to an appropriate gRPC status error. +// It maps common SDK error codes to their gRPC equivalents: +// - ErrInvalidRequest -> InvalidArgument +// - ErrUnauthorized -> Unauthenticated +// - ErrKeyNotFound -> NotFound +// Any other error codes are mapped to Unknown. +func sdkErrorToGRPCError(resp abci.QueryResponse) error { + switch resp.Code { + case sdkerrors.ErrInvalidRequest.ABCICode(): + return status.Error(codes.InvalidArgument, resp.Log) + case sdkerrors.ErrUnauthorized.ABCICode(): + return status.Error(codes.Unauthenticated, resp.Log) + case sdkerrors.ErrKeyNotFound.ABCICode(): + return status.Error(codes.NotFound, resp.Log) + default: + return status.Error(codes.Unknown, resp.Log) + } +} + +// getRPCCodec returns the gRPC codec for the CometBFT broadcaster. +// If the broadcaster's codec implements GRPCCodecProvider, it returns its gRPC codec. +// Otherwise, it creates a new ProtoCodec with the broadcaster's interface registry and returns its gRPC codec. +func (c *CometBFTBroadcaster) getRPCCodec() encoding.Codec { + cdc, ok := c.cdc.(codec.GRPCCodecProvider) + if !ok { + return codec.NewProtoCodec(c.cdc.InterfaceRegistry()).GRPCCodec() + } + + return cdc.GRPCCodec() +} diff --git a/client/v2/broadcast/comet/comet.go b/client/v2/broadcast/comet/comet.go index d6ab7f904477..6fee9fe27e85 100644 --- a/client/v2/broadcast/comet/comet.go +++ b/client/v2/broadcast/comet/comet.go @@ -66,11 +66,11 @@ var _ broadcast.Broadcaster = &CometBFTBroadcaster{} type CometBFTBroadcaster struct { rpcClient CometRPC mode string - cdc codec.JSONCodec + cdc codec.Codec } // NewCometBFTBroadcaster creates a new CometBFTBroadcaster. -func NewCometBFTBroadcaster(rpcURL, mode string, cdc codec.JSONCodec) (*CometBFTBroadcaster, error) { +func NewCometBFTBroadcaster(rpcURL, mode string, cdc codec.Codec) (*CometBFTBroadcaster, error) { if cdc == nil { return nil, errors.New("codec can't be nil") } diff --git a/client/v2/broadcast/comet/comet_test.go b/client/v2/broadcast/comet/comet_test.go index 0eb8b81685ed..69c032f2e12d 100644 --- a/client/v2/broadcast/comet/comet_test.go +++ b/client/v2/broadcast/comet/comet_test.go @@ -22,7 +22,7 @@ var cdc = testutil.CodecOptions{}.NewCodec() func TestNewCometBftBroadcaster(t *testing.T) { tests := []struct { name string - cdc codec.JSONCodec + cdc codec.Codec mode string want *CometBFTBroadcaster wantErr bool diff --git a/client/v2/context/context.go b/client/v2/context/context.go new file mode 100644 index 000000000000..fdb65b517498 --- /dev/null +++ b/client/v2/context/context.go @@ -0,0 +1,55 @@ +package context + +import ( + gocontext "context" + "errors" + + "github.com/spf13/pflag" + + apisigning "cosmossdk.io/api/cosmos/tx/signing/v1beta1" + "cosmossdk.io/client/v2/autocli/keyring" + "cosmossdk.io/core/address" + + "github.com/cosmos/cosmos-sdk/codec" +) + +// ContextKey is a key used to store and retrieve Context from a Go context.Context. +var ContextKey contextKey + +// contextKey is an empty struct used as a key type for storing Context in a context.Context. +type contextKey struct{} + +// Context represents the client context used in autocli commands. +// It contains various components needed for command execution. +type Context struct { + Flags *pflag.FlagSet + + AddressCodec address.Codec + ValidatorAddressCodec address.ValidatorAddressCodec + ConsensusAddressCodec address.ConsensusAddressCodec + + Cdc codec.Codec + + Keyring keyring.Keyring + + EnabledSignModes []apisigning.SignMode +} + +// SetInContext stores the provided autocli.Context in the given Go context.Context. +// It returns a new context.Context containing the autocli.Context value. +func SetInContext(goCtx gocontext.Context, cliCtx Context) gocontext.Context { + return gocontext.WithValue(goCtx, ContextKey, cliCtx) +} + +// ClientContextFromGoContext returns the autocli.Context from a given Go context. +// It checks if the context contains a valid autocli.Context and returns it. +func ClientContextFromGoContext(ctx gocontext.Context) (*Context, error) { + if c := ctx.Value(ContextKey); c != nil { + cliCtx, ok := c.(Context) + if !ok { + return nil, errors.New("context value is not of type autocli.Context") + } + return &cliCtx, nil + } + return nil, errors.New("context does not contain autocli.Context value") +} diff --git a/client/v2/go.mod b/client/v2/go.mod index b39ea3a3eaf8..ced3d482adfc 100644 --- a/client/v2/go.mod +++ b/client/v2/go.mod @@ -36,7 +36,7 @@ require ( buf.build/gen/go/cosmos/gogo-proto/protocolbuffers/go v1.35.2-20240130113600-88ef6483f90f.1 // indirect cosmossdk.io/collections v0.4.1-0.20241128094659-bd76b47e1d8b // indirect cosmossdk.io/core/testing v0.0.0-20241108153815-606544c7be7e // indirect - cosmossdk.io/errors v1.0.1 // indirect + cosmossdk.io/errors v1.0.1 cosmossdk.io/log v1.5.0 cosmossdk.io/math v1.4.0 cosmossdk.io/schema v0.3.1-0.20241128094659-bd76b47e1d8b // indirect @@ -60,7 +60,7 @@ require ( github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/cometbft/cometbft v1.0.0-rc2.0.20241127125717-4ce33b646ac9 github.com/cometbft/cometbft-db v1.0.1 // indirect - github.com/cometbft/cometbft/api v1.0.0-rc2 // indirect + github.com/cometbft/cometbft/api v1.0.0-rc2 github.com/cosmos/btcutil v1.0.5 // indirect github.com/cosmos/cosmos-db v1.1.0 // indirect github.com/cosmos/go-bip39 v1.0.0 @@ -127,7 +127,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect github.com/oklog/run v1.1.0 // indirect - github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -145,7 +145,7 @@ require ( github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.7.0 // indirect - github.com/spf13/viper v1.19.0 // indirect + github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.10.0 github.com/subosito/gotenv v1.6.0 // indirect github.com/supranational/blst v0.3.13 // indirect diff --git a/client/v2/internal/flags/flags.go b/client/v2/internal/flags/flags.go index d06cef708cad..7f684ac43544 100644 --- a/client/v2/internal/flags/flags.go +++ b/client/v2/internal/flags/flags.go @@ -2,6 +2,12 @@ package flags // This defines flag names that can be used in autocli. const ( + // FlagHome is the flag to specify the home dir of the app. + FlagHome = "home" + + // FlagChainID is the flag to specify the chain ID of the network. + FlagChainID = "chain-id" + // FlagFrom is the flag to set the from address with which to sign the transaction. FlagFrom = "from" @@ -14,9 +20,24 @@ const ( // FlagNoPrompt is the flag to not use a prompt for commands. FlagNoPrompt = "no-prompt" + // FlagKeyringDir is the flag to specify the directory where the keyring is stored. + FlagKeyringDir = "keyring-dir" + // FlagKeyringBackend is the flag to specify which backend to use for the keyring (e.g. os, file, test). + FlagKeyringBackend = "keyring-backend" + // FlagNoProposal is the flag convert a gov proposal command into a normal command. // This is used to allow user of chains with custom authority to not use gov submit proposals for usual proposal commands. FlagNoProposal = "no-proposal" + + // FlagNode is the flag to specify the node address to connect to. + FlagNode = "node" + // FlagBroadcastMode is the flag to specify the broadcast mode for transactions. + FlagBroadcastMode = "broadcast-mode" + + // FlagGrpcAddress is the flag to specify the gRPC server address to connect to. + FlagGrpcAddress = "grpc-addr" + // FlagGrpcInsecure is the flag to allow insecure gRPC connections. + FlagGrpcInsecure = "grpc-insecure" ) // List of supported output formats diff --git a/client/v2/internal/print/printer.go b/client/v2/internal/print/printer.go new file mode 100644 index 000000000000..631281bcb0a2 --- /dev/null +++ b/client/v2/internal/print/printer.go @@ -0,0 +1,84 @@ +package print + +import ( + "encoding/json" + "fmt" + "io" + "os" + + "github.com/spf13/cobra" + "sigs.k8s.io/yaml" + + "cosmossdk.io/client/v2/internal/flags" +) + +const ( + jsonOutput = flags.OutputFormatJSON + textOutput = flags.OutputFormatText +) + +// Printer handles formatted output of different types of data +type Printer struct { + Output io.Writer + OutputFormat string +} + +// NewPrinter creates a new Printer instance with default stdout +func NewPrinter(cmd *cobra.Command) (*Printer, error) { + outputFormat, err := cmd.Flags().GetString("output") + if err != nil { + return nil, err + } + + if outputFormat != jsonOutput && outputFormat != textOutput { + return nil, fmt.Errorf("unsupported output format: %s", outputFormat) + } + + return &Printer{ + Output: cmd.OutOrStdout(), + OutputFormat: outputFormat, + }, nil +} + +// PrintString prints the raw string +func (p *Printer) PrintString(str string) error { + return p.PrintBytes([]byte(str)) +} + +// PrintRaw prints raw JSON message without marshaling +func (p *Printer) PrintRaw(toPrint json.RawMessage) error { + return p.PrintBytes(toPrint) +} + +// PrintBytes prints and formats bytes +func (p *Printer) PrintBytes(out []byte) error { + var err error + if p.OutputFormat == textOutput { + if !json.Valid(out) { + return fmt.Errorf("invalid JSON") + } + out, err = yaml.JSONToYAML(out) + if err != nil { + return err + } + } + + writer := p.Output + if writer == nil { + writer = os.Stdout + } + + _, err = writer.Write(out) + if err != nil { + return err + } + + if p.OutputFormat != textOutput { + _, err = writer.Write([]byte("\n")) + if err != nil { + return err + } + } + + return nil +} diff --git a/client/v2/offchain/cli.go b/client/v2/offchain/cli.go index 7738a6204451..024df5663912 100644 --- a/client/v2/offchain/cli.go +++ b/client/v2/offchain/cli.go @@ -6,15 +6,24 @@ import ( "github.com/spf13/cobra" + "cosmossdk.io/client/v2/autocli/config" + "cosmossdk.io/client/v2/autocli/keyring" + "cosmossdk.io/client/v2/broadcast/comet" + clientcontext "cosmossdk.io/client/v2/context" v2flags "cosmossdk.io/client/v2/internal/flags" - "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/address" + "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + sdk "github.com/cosmos/cosmos-sdk/types" ) const ( flagEncoding = "encoding" flagFileFormat = "file-format" + flagBech32 = "bech32" ) // OffChain off-chain utilities. @@ -31,6 +40,7 @@ func OffChain() *cobra.Command { ) flags.AddKeyringFlags(cmd.PersistentFlags()) + cmd.PersistentFlags().String(flagBech32, "cosmos", "address bech32 prefix") return cmd } @@ -42,7 +52,19 @@ func SignFile() *cobra.Command { Long: "Sign a file using a given key.", Args: cobra.ExactArgs(2), RunE: func(cmd *cobra.Command, args []string) error { - clientCtx := client.GetClientContextFromCmd(cmd) + ir := types.NewInterfaceRegistry() + cryptocodec.RegisterInterfaces(ir) + cdc := codec.NewProtoCodec(ir) + + c, err := config.CreateClientConfigFromFlags(cmd.Flags()) + if err != nil { + return err + } + + keyringBackend := c.KeyringBackend + if !cmd.Flags().Changed(v2flags.FlagKeyringBackend) { + _ = cmd.Flags().Set(v2flags.FlagKeyringBackend, keyringBackend) + } bz, err := os.ReadFile(args[1]) if err != nil { @@ -53,8 +75,29 @@ func SignFile() *cobra.Command { outputFormat, _ := cmd.Flags().GetString(v2flags.FlagOutput) outputFile, _ := cmd.Flags().GetString(flags.FlagOutputDocument) signMode, _ := cmd.Flags().GetString(flags.FlagSignMode) + bech32Prefix, _ := cmd.Flags().GetString(flagBech32) + + ac := address.NewBech32Codec(bech32Prefix) + k, err := keyring.NewKeyringFromFlags(cmd.Flags(), ac, cmd.InOrStdin(), cdc) + if err != nil { + return err + } + + // off-chain does not need to query any information + conn, err := comet.NewCometBFTBroadcaster("", comet.BroadcastSync, cdc) + if err != nil { + return err + } - signedTx, err := Sign(clientCtx, bz, args[0], encoding, signMode, outputFormat) + ctx := clientcontext.Context{ + Flags: cmd.Flags(), + AddressCodec: ac, + ValidatorAddressCodec: address.NewBech32Codec(sdk.GetBech32PrefixValAddr(bech32Prefix)), + Cdc: cdc, + Keyring: k, + } + + signedTx, err := Sign(ctx, bz, conn, args[0], encoding, signMode, outputFormat) if err != nil { return err } @@ -87,10 +130,8 @@ func VerifyFile() *cobra.Command { Long: "Verify a previously signed file with the given key.", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - clientCtx, err := client.GetClientQueryContext(cmd) - if err != nil { - return err - } + ir := types.NewInterfaceRegistry() + cdc := codec.NewProtoCodec(ir) bz, err := os.ReadFile(args[0]) if err != nil { @@ -98,8 +139,18 @@ func VerifyFile() *cobra.Command { } fileFormat, _ := cmd.Flags().GetString(flagFileFormat) + bech32Prefix, _ := cmd.Flags().GetString(flagBech32) + + ac := address.NewBech32Codec(bech32Prefix) + + ctx := clientcontext.Context{ + Flags: cmd.Flags(), + AddressCodec: ac, + ValidatorAddressCodec: address.NewBech32Codec(sdk.GetBech32PrefixValAddr(bech32Prefix)), + Cdc: cdc, + } - err = Verify(clientCtx, bz, fileFormat) + err = Verify(ctx, bz, fileFormat) if err == nil { cmd.Println("Verification OK!") } diff --git a/client/v2/offchain/common_test.go b/client/v2/offchain/common_test.go index 5b862fcb20bb..d455fa74d102 100644 --- a/client/v2/offchain/common_test.go +++ b/client/v2/offchain/common_test.go @@ -2,32 +2,17 @@ package offchain import ( "context" - "testing" + "errors" - "github.com/stretchr/testify/require" + gogogrpc "github.com/cosmos/gogoproto/grpc" "google.golang.org/grpc" - bankv1beta1 "cosmossdk.io/api/cosmos/bank/v1beta1" - "cosmossdk.io/x/tx/signing" - "cosmossdk.io/x/tx/signing/aminojson" - "cosmossdk.io/x/tx/signing/direct" - "cosmossdk.io/x/tx/signing/directaux" - "cosmossdk.io/x/tx/signing/textual" - - "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec" - "github.com/cosmos/cosmos-sdk/codec/address" "github.com/cosmos/cosmos-sdk/codec/testutil" cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - signingtypes "github.com/cosmos/cosmos-sdk/types/tx/signing" ) -const ( - addressCodecPrefix = "cosmos" - validatorAddressCodecPrefix = "cosmosvaloper" - mnemonic = "have embark stumble card pistol fun gauge obtain forget oil awesome lottery unfold corn sure original exist siren pudding spread uphold dwarf goddess card" -) +const mnemonic = "have embark stumble card pistol fun gauge obtain forget oil awesome lottery unfold corn sure original exist siren pudding spread uphold dwarf goddess card" func getCodec() codec.Codec { registry := testutil.CodecOptions{}.NewInterfaceRegistry() @@ -36,111 +21,14 @@ func getCodec() codec.Codec { return codec.NewProtoCodec(registry) } -func newGRPCCoinMetadataQueryFn(grpcConn grpc.ClientConnInterface) textual.CoinMetadataQueryFn { - return func(ctx context.Context, denom string) (*bankv1beta1.Metadata, error) { - bankQueryClient := bankv1beta1.NewQueryClient(grpcConn) - res, err := bankQueryClient.DenomMetadata(ctx, &bankv1beta1.QueryDenomMetadataRequest{ - Denom: denom, - }) - if err != nil { - return nil, err - } - - return res.Metadata, nil - } -} - -// testConfig fulfills client.TxConfig although SignModeHandler is the only method implemented. -type testConfig struct { - handler *signing.HandlerMap -} - -func (t testConfig) SignModeHandler() *signing.HandlerMap { - return t.handler -} - -func (t testConfig) TxEncoder() sdk.TxEncoder { - return nil -} +var _ gogogrpc.ClientConn = mockClientConn{} -func (t testConfig) TxDecoder() sdk.TxDecoder { - return nil -} +type mockClientConn struct{} -func (t testConfig) TxJSONEncoder() sdk.TxEncoder { - return nil +func (c mockClientConn) Invoke(_ context.Context, _ string, _, _ interface{}, _ ...grpc.CallOption) error { + return errors.New("not implemented") } -func (t testConfig) TxJSONDecoder() sdk.TxDecoder { - return nil -} - -func (t testConfig) MarshalSignatureJSON(v2s []signingtypes.SignatureV2) ([]byte, error) { - return nil, nil -} - -func (t testConfig) UnmarshalSignatureJSON(bytes []byte) ([]signingtypes.SignatureV2, error) { - return nil, nil -} - -func (t testConfig) NewTxBuilder() client.TxBuilder { - return nil -} - -func (t testConfig) WrapTxBuilder(s sdk.Tx) (client.TxBuilder, error) { - return nil, nil -} - -func (t testConfig) SigningContext() *signing.Context { - return nil -} - -func newTestConfig(t *testing.T) *testConfig { - t.Helper() - - enabledSignModes := []signingtypes.SignMode{ - signingtypes.SignMode_SIGN_MODE_DIRECT, - signingtypes.SignMode_SIGN_MODE_DIRECT_AUX, - signingtypes.SignMode_SIGN_MODE_LEGACY_AMINO_JSON, - signingtypes.SignMode_SIGN_MODE_TEXTUAL, - } - - var err error - signingOptions := signing.Options{ - AddressCodec: address.NewBech32Codec(addressCodecPrefix), - ValidatorAddressCodec: address.NewBech32Codec(validatorAddressCodecPrefix), - } - signingContext, err := signing.NewContext(signingOptions) - require.NoError(t, err) - - lenSignModes := len(enabledSignModes) - handlers := make([]signing.SignModeHandler, lenSignModes) - for i, m := range enabledSignModes { - var err error - switch m { - case signingtypes.SignMode_SIGN_MODE_DIRECT: - handlers[i] = &direct.SignModeHandler{} - case signingtypes.SignMode_SIGN_MODE_DIRECT_AUX: - handlers[i], err = directaux.NewSignModeHandler(directaux.SignModeHandlerOptions{ - TypeResolver: signingOptions.TypeResolver, - SignersContext: signingContext, - }) - require.NoError(t, err) - case signingtypes.SignMode_SIGN_MODE_LEGACY_AMINO_JSON: - handlers[i] = aminojson.NewSignModeHandler(aminojson.SignModeHandlerOptions{ - FileResolver: signingOptions.FileResolver, - TypeResolver: signingOptions.TypeResolver, - }) - case signingtypes.SignMode_SIGN_MODE_TEXTUAL: - handlers[i], err = textual.NewSignModeHandler(textual.SignModeOptions{ - CoinMetadataQuerier: newGRPCCoinMetadataQueryFn(client.Context{}), - FileResolver: signingOptions.FileResolver, - TypeResolver: signingOptions.TypeResolver, - }) - require.NoError(t, err) - } - } - - handler := signing.NewHandlerMap(handlers...) - return &testConfig{handler: handler} +func (c mockClientConn) NewStream(_ context.Context, _ *grpc.StreamDesc, _ string, _ ...grpc.CallOption) (grpc.ClientStream, error) { + return nil, errors.New("not implemented") } diff --git a/client/v2/offchain/sign.go b/client/v2/offchain/sign.go index 8dfcb907c089..1ce41de31857 100644 --- a/client/v2/offchain/sign.go +++ b/client/v2/offchain/sign.go @@ -4,13 +4,14 @@ import ( "context" "fmt" + gogogrpc "github.com/cosmos/gogoproto/grpc" + apisigning "cosmossdk.io/api/cosmos/tx/signing/v1beta1" + clientcontext "cosmossdk.io/client/v2/context" "cosmossdk.io/client/v2/internal/account" "cosmossdk.io/client/v2/internal/offchain" clitx "cosmossdk.io/client/v2/tx" - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/cosmos/cosmos-sdk/version" ) @@ -29,20 +30,20 @@ var enabledSignModes = []apisigning.SignMode{ } // Sign signs given bytes using the specified encoder and SignMode. -func Sign(ctx client.Context, rawBytes []byte, fromName, encoding, signMode, output string) (string, error) { +func Sign( + ctx clientcontext.Context, + rawBytes []byte, + conn gogogrpc.ClientConn, + fromName, encoding, signMode, output string, +) (string, error) { digest, err := encodeDigest(encoding, rawBytes) if err != nil { return "", err } - keybase, err := keyring.NewAutoCLIKeyring(ctx.Keyring, ctx.AddressCodec) - if err != nil { - return "", err - } - txConfig, err := clitx.NewTxConfig(clitx.ConfigOptions{ AddressCodec: ctx.AddressCodec, - Cdc: ctx.Codec, + Cdc: ctx.Cdc, ValidatorAddressCodec: ctx.ValidatorAddressCodec, EnabledSignModes: enabledSignModes, }) @@ -50,7 +51,7 @@ func Sign(ctx client.Context, rawBytes []byte, fromName, encoding, signMode, out return "", err } - accRetriever := account.NewAccountRetriever(ctx.AddressCodec, ctx, ctx.InterfaceRegistry) + accRetriever := account.NewAccountRetriever(ctx.AddressCodec, conn, ctx.Cdc.InterfaceRegistry()) sm, err := getSignMode(signMode) if err != nil { @@ -66,12 +67,12 @@ func Sign(ctx client.Context, rawBytes []byte, fromName, encoding, signMode, out }, } - txf, err := clitx.NewFactory(keybase, ctx.Codec, accRetriever, txConfig, ctx.AddressCodec, ctx, params) + txf, err := clitx.NewFactory(ctx.Keyring, ctx.Cdc, accRetriever, txConfig, ctx.AddressCodec, conn, params) if err != nil { return "", err } - pubKey, err := keybase.GetPubKey(fromName) + pubKey, err := ctx.Keyring.GetPubKey(fromName) if err != nil { return "", err } diff --git a/client/v2/offchain/sign_test.go b/client/v2/offchain/sign_test.go index 839872866629..cb95b0485c39 100644 --- a/client/v2/offchain/sign_test.go +++ b/client/v2/offchain/sign_test.go @@ -5,23 +5,28 @@ import ( "github.com/stretchr/testify/require" - "github.com/cosmos/cosmos-sdk/client" + clientcontext "cosmossdk.io/client/v2/context" + "github.com/cosmos/cosmos-sdk/codec/address" "github.com/cosmos/cosmos-sdk/crypto/hd" "github.com/cosmos/cosmos-sdk/crypto/keyring" ) func TestSign(t *testing.T) { + ac := address.NewBech32Codec("cosmos") + vc := address.NewBech32Codec("cosmosvaloper") k := keyring.NewInMemory(getCodec()) _, err := k.NewAccount("signVerify", mnemonic, "", "m/44'/118'/0'/0/0", hd.Secp256k1) require.NoError(t, err) - ctx := client.Context{ - TxConfig: newTestConfig(t), - Codec: getCodec(), - AddressCodec: address.NewBech32Codec("cosmos"), - ValidatorAddressCodec: address.NewBech32Codec("cosmosvaloper"), - Keyring: k, + autoKeyring, err := keyring.NewAutoCLIKeyring(k, ac) + require.NoError(t, err) + + ctx := clientcontext.Context{ + AddressCodec: ac, + ValidatorAddressCodec: vc, + Cdc: getCodec(), + Keyring: autoKeyring, } tests := []struct { name string @@ -52,7 +57,7 @@ func TestSign(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := Sign(ctx, tt.rawBytes, "signVerify", tt.encoding, tt.signMode, "json") + got, err := Sign(ctx, tt.rawBytes, mockClientConn{}, "signVerify", tt.encoding, tt.signMode, "json") if tt.wantErr { require.Error(t, err) } else { diff --git a/client/v2/offchain/verify.go b/client/v2/offchain/verify.go index 2c064faccc71..5e87cb90129b 100644 --- a/client/v2/offchain/verify.go +++ b/client/v2/offchain/verify.go @@ -8,19 +8,20 @@ import ( "google.golang.org/protobuf/types/known/anypb" + clientcontext "cosmossdk.io/client/v2/context" clitx "cosmossdk.io/client/v2/tx" + "cosmossdk.io/core/address" txsigning "cosmossdk.io/x/tx/signing" - "github.com/cosmos/cosmos-sdk/client" codectypes "github.com/cosmos/cosmos-sdk/codec/types" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" ) // Verify verifies a digest after unmarshalling it. -func Verify(ctx client.Context, digest []byte, fileFormat string) error { +func Verify(ctx clientcontext.Context, digest []byte, fileFormat string) error { txConfig, err := clitx.NewTxConfig(clitx.ConfigOptions{ AddressCodec: ctx.AddressCodec, - Cdc: ctx.Codec, + Cdc: ctx.Cdc, ValidatorAddressCodec: ctx.ValidatorAddressCodec, EnabledSignModes: enabledSignModes, }) @@ -33,12 +34,12 @@ func Verify(ctx client.Context, digest []byte, fileFormat string) error { return err } - return verify(ctx, dTx) + return verify(ctx.AddressCodec, txConfig, dTx) } // verify verifies given Tx. -func verify(ctx client.Context, dTx clitx.Tx) error { - signModeHandler := ctx.TxConfig.SignModeHandler() +func verify(addressCodec address.Codec, txConfig clitx.TxConfig, dTx clitx.Tx) error { + signModeHandler := txConfig.SignModeHandler() signers, err := dTx.GetSigners() if err != nil { @@ -60,7 +61,7 @@ func verify(ctx client.Context, dTx clitx.Tx) error { return errors.New("signature does not match its respective signer") } - addr, err := ctx.AddressCodec.BytesToString(pubKey.Address()) + addr, err := addressCodec.BytesToString(pubKey.Address()) if err != nil { return err } diff --git a/client/v2/offchain/verify_test.go b/client/v2/offchain/verify_test.go index 56345504d80e..d45648bd07c6 100644 --- a/client/v2/offchain/verify_test.go +++ b/client/v2/offchain/verify_test.go @@ -6,59 +6,53 @@ import ( "github.com/stretchr/testify/require" _ "cosmossdk.io/api/cosmos/crypto/secp256k1" + clientcontext "cosmossdk.io/client/v2/context" clitx "cosmossdk.io/client/v2/tx" - "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec/address" "github.com/cosmos/cosmos-sdk/crypto/hd" "github.com/cosmos/cosmos-sdk/crypto/keyring" ) func Test_Verify(t *testing.T) { - ctx := client.Context{ - TxConfig: newTestConfig(t), - Codec: getCodec(), + ctx := clientcontext.Context{ AddressCodec: address.NewBech32Codec("cosmos"), ValidatorAddressCodec: address.NewBech32Codec("cosmosvaloper"), + Cdc: getCodec(), } tests := []struct { name string digest []byte fileFormat string - ctx client.Context wantErr bool }{ { name: "verify json", digest: []byte("{\"body\":{\"messages\":[{\"@type\":\"/offchain.MsgSignArbitraryData\", \"app_domain\":\"\", \"signer\":\"cosmos16877zjk85kwlap3wclpmx34e0xllg2erc7u7m4\", \"data\":\"{\\n\\t\\\"name\\\": \\\"Sarah\\\",\\n\\t\\\"surname\\\": \\\"Connor\\\",\\n\\t\\\"age\\\": 29\\n}\\n\"}], \"timeout_timestamp\":\"0001-01-01T00:00:00Z\"}, \"auth_info\":{\"signer_infos\":[{\"public_key\":{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\", \"key\":\"Ahhu3idSSUAQXtDBvBjUlCPWH3od4rXyWgb7L4scSj4m\"}, \"mode_info\":{\"single\":{\"mode\":\"SIGN_MODE_DIRECT\"}}}], \"fee\":{}}, \"signatures\":[\"tdXsO5uNqIBFSBKEA1e3Wrcb6ejriP9HwlcBTkU7EUJzuezjg6Rvr1a+Kp6umCAN7MWoBHRT2cmqzDfg6RjaYA==\"]}"), fileFormat: "json", - ctx: ctx, }, { name: "wrong signer json", digest: []byte("{\"body\":{\"messages\":[{\"@type\":\"/offchain.MsgSignArbitraryData\", \"app_domain\":\"\", \"signer\":\"cosmos1xv9e39mkhhyg5aneu2myj82t7029sv48qu3pgj\", \"data\":\"{\\n\\t\\\"name\\\": \\\"Sarah\\\",\\n\\t\\\"surname\\\": \\\"Connor\\\",\\n\\t\\\"age\\\": 29\\n}\\n\"}], \"timeout_timestamp\":\"0001-01-01T00:00:00Z\"}, \"auth_info\":{\"signer_infos\":[{\"public_key\":{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\", \"key\":\"Ahhu3idSSUAQXtDBvBjUlCPWH3od4rXyWgb7L4scSj4m\"}, \"mode_info\":{\"single\":{\"mode\":\"SIGN_MODE_DIRECT\"}}}], \"fee\":{}}, \"signatures\":[\"tdXsO5uNqIBFSBKEA1e3Wrcb6ejriP9HwlcBTkU7EUJzuezjg6Rvr1a+Kp6umCAN7MWoBHRT2cmqzDfg6RjaYA==\"]}"), fileFormat: "json", - ctx: ctx, wantErr: true, }, { name: "verify text", digest: []byte("body:{messages:{[/offchain.MsgSignArbitraryData]:{app_domain:\"\" signer:\"cosmos16877zjk85kwlap3wclpmx34e0xllg2erc7u7m4\" data:\"{\\n\\t\\\"name\\\": \\\"Sarah\\\",\\n\\t\\\"surname\\\": \\\"Connor\\\",\\n\\t\\\"age\\\": 29\\n}\\n\"}} timeout_timestamp:{seconds:-62135596800}} auth_info:{signer_infos:{public_key:{[/cosmos.crypto.secp256k1.PubKey]:{key:\"\\x02\\x18n\\xde'RI@\\x10^\\xd0\\xc1\\xbc\\x18Ԕ#\\xd6\\x1fz\\x1d\\xe2\\xb5\\xf2Z\\x06\\xfb/\\x8b\\x1cJ>&\"}} mode_info:{single:{mode:SIGN_MODE_DIRECT}}} fee:{}} signatures:\"\\xb5\\xd5\\xec;\\x9b\\x8d\\xa8\\x80EH\\x12\\x84\\x03W\\xb7Z\\xb7\\x1b\\xe9\\xe8\\xeb\\x88\\xffG\\xc2W\\x01NE;\\x11Bs\\xb9\\xecヤo\\xafV\\xbe*\\x9e\\xae\\x98 \\r\\xecŨ\\x04tS\\xd9ɪ\\xcc7\\xe0\\xe9\\x18\\xda`\"\n"), fileFormat: "text", - ctx: ctx, }, { name: "wrong signer text", digest: []byte("body:{messages:{[/offchain.MsgSignArbitraryData]:{app_domain:\"\" signer:\"cosmos1xv9e39mkhhyg5aneu2myj82t7029sv48qu3pgj\" data:\"{\\n\\t\\\"name\\\": \\\"Sarah\\\",\\n\\t\\\"surname\\\": \\\"Connor\\\",\\n\\t\\\"age\\\": 29\\n}\\n\"}} timeout_timestamp:{seconds:-62135596800}} auth_info:{signer_infos:{public_key:{[/cosmos.crypto.secp256k1.PubKey]:{key:\"\\x02\\x18n\\xde'RI@\\x10^\\xd0\\xc1\\xbc\\x18Ԕ#\\xd6\\x1fz\\x1d\\xe2\\xb5\\xf2Z\\x06\\xfb/\\x8b\\x1cJ>&\"}} mode_info:{single:{mode:SIGN_MODE_DIRECT}}} fee:{}} signatures:\"\\xb5\\xd5\\xec;\\x9b\\x8d\\xa8\\x80EH\\x12\\x84\\x03W\\xb7Z\\xb7\\x1b\\xe9\\xe8\\xeb\\x88\\xffG\\xc2W\\x01NE;\\x11Bs\\xb9\\xecヤo\\xafV\\xbe*\\x9e\\xae\\x98 \\r\\xecŨ\\x04tS\\xd9ɪ\\xcc7\\xe0\\xe9\\x18\\xda`\"\n"), fileFormat: "text", - ctx: ctx, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := Verify(tt.ctx, tt.digest, tt.fileFormat) + err := Verify(ctx, tt.digest, tt.fileFormat) if tt.wantErr { require.Error(t, err) } else { @@ -69,19 +63,23 @@ func Test_Verify(t *testing.T) { } func Test_SignVerify(t *testing.T) { + ac := address.NewBech32Codec("cosmos") + k := keyring.NewInMemory(getCodec()) _, err := k.NewAccount("signVerify", mnemonic, "", "m/44'/118'/0'/0/0", hd.Secp256k1) require.NoError(t, err) - ctx := client.Context{ - TxConfig: newTestConfig(t), - Codec: getCodec(), + autoKeyring, err := keyring.NewAutoCLIKeyring(k, ac) + require.NoError(t, err) + + ctx := clientcontext.Context{ AddressCodec: address.NewBech32Codec("cosmos"), ValidatorAddressCodec: address.NewBech32Codec("cosmosvaloper"), - Keyring: k, + Cdc: getCodec(), + Keyring: autoKeyring, } - tx, err := Sign(ctx, []byte("Hello World!"), "signVerify", "no-encoding", "direct", "json") + tx, err := Sign(ctx, []byte("Hello World!"), mockClientConn{}, "signVerify", "no-encoding", "direct", "json") require.NoError(t, err) err = Verify(ctx, []byte(tx), "json") diff --git a/client/v2/tx/encoder.go b/client/v2/tx/encoder.go index 3e917b34b4c3..09011c8315e4 100644 --- a/client/v2/tx/encoder.go +++ b/client/v2/tx/encoder.go @@ -19,9 +19,10 @@ var ( // jsonMarshalOptions configures JSON marshaling for protobuf messages. jsonMarshalOptions = protojson.MarshalOptions{ - Indent: "", - UseProtoNames: true, - UseEnumNumbers: false, + Indent: "", + UseProtoNames: true, + UseEnumNumbers: false, + EmitUnpopulated: true, } // textMarshalOptions diff --git a/client/v2/tx/factory.go b/client/v2/tx/factory.go index 8007caaee4f8..6b18f492426c 100644 --- a/client/v2/tx/factory.go +++ b/client/v2/tx/factory.go @@ -44,7 +44,7 @@ type Factory struct { txConfig TxConfig txParams TxParameters - tx txState + tx *txState } func NewFactoryFromFlagSet(flags *pflag.FlagSet, keybase keyring.Keyring, cdc codec.BinaryCodec, accRetriever account.AccountRetriever, @@ -81,38 +81,37 @@ func NewFactory(keybase keyring.Keyring, cdc codec.BinaryCodec, accRetriever acc txConfig: txConfig, txParams: parameters, - tx: txState{}, + tx: &txState{}, }, nil } // validateFlagSet checks the provided flags for consistency and requirements based on the operation mode. func validateFlagSet(flags *pflag.FlagSet, offline bool) error { + dryRun, _ := flags.GetBool(flags2.FlagDryRun) + if offline && dryRun { + return errors.New("dry-run: cannot use offline mode") + } + + generateOnly, _ := flags.GetBool(flags2.FlagGenerateOnly) + chainID, _ := flags.GetString(flags2.FlagChainID) if offline { - if !flags.Changed(flags2.FlagAccountNumber) || !flags.Changed(flags2.FlagSequence) { + if !generateOnly && (!flags.Changed(flags2.FlagAccountNumber) || !flags.Changed(flags2.FlagSequence)) { return errors.New("account-number and sequence must be set in offline mode") } + if generateOnly && chainID != "" { + return errors.New("chain ID cannot be used when offline and generate-only flags are set") + } + gas, _ := flags.GetString(flags2.FlagGas) gasSetting, _ := flags2.ParseGasSetting(gas) if gasSetting.Simulate { return errors.New("simulate and offline flags cannot be set at the same time") } - } - - generateOnly, _ := flags.GetBool(flags2.FlagGenerateOnly) - chainID, _ := flags.GetString(flags2.FlagChainID) - if offline && generateOnly && chainID != "" { - return errors.New("chain ID cannot be used when offline and generate-only flags are set") - } - if chainID == "" { + } else if chainID == "" { return errors.New("chain ID required but not specified") } - dryRun, _ := flags.GetBool(flags2.FlagDryRun) - if offline && dryRun { - return errors.New("dry-run: cannot use offline mode") - } - return nil } diff --git a/client/v2/tx/flags.go b/client/v2/tx/flags.go index 6ef8584042f7..9d0a7c4a74d9 100644 --- a/client/v2/tx/flags.go +++ b/client/v2/tx/flags.go @@ -10,23 +10,23 @@ const ( defaultGasLimit = 200000 gasFlagAuto = "auto" - flagTimeoutTimestamp = "timeout-timestamp" - flagChainID = "chain-id" - flagNote = "note" - flagSignMode = "sign-mode" - flagAccountNumber = "account-number" - flagSequence = "sequence" - flagFrom = "from" - flagDryRun = "dry-run" - flagGas = "gas" - flagGasAdjustment = "gas-adjustment" - flagGasPrices = "gas-prices" - flagFees = "fees" - flagFeePayer = "fee-payer" - flagFeeGranter = "fee-granter" - flagUnordered = "unordered" - flagOffline = "offline" - flagGenerateOnly = "generate-only" + FlagTimeoutTimestamp = "timeout-timestamp" + FlagChainID = "chain-id" + FlagNote = "note" + FlagSignMode = "sign-mode" + FlagAccountNumber = "account-number" + FlagSequence = "sequence" + FlagFrom = "from" + FlagDryRun = "dry-run" + FlagGas = "gas" + FlagGasAdjustment = "gas-adjustment" + FlagGasPrices = "gas-prices" + FlagFees = "fees" + FlagFeePayer = "fee-payer" + FlagFeeGranter = "fee-granter" + FlagUnordered = "unordered" + FlagOffline = "offline" + FlagGenerateOnly = "generate-only" ) // parseGasSetting parses a string gas value. The value may either be 'auto', diff --git a/client/v2/tx/tx.go b/client/v2/tx/tx.go index c6bb5a548f92..34278e48a024 100644 --- a/client/v2/tx/tx.go +++ b/client/v2/tx/tx.go @@ -1,91 +1,165 @@ package tx import ( - "bufio" "context" "errors" "fmt" - "os" + "github.com/cosmos/gogoproto/grpc" "github.com/cosmos/gogoproto/proto" "github.com/spf13/pflag" apitxsigning "cosmossdk.io/api/cosmos/tx/signing/v1beta1" "cosmossdk.io/client/v2/broadcast" "cosmossdk.io/client/v2/broadcast/comet" + clientcontext "cosmossdk.io/client/v2/context" "cosmossdk.io/client/v2/internal/account" + "cosmossdk.io/client/v2/internal/flags" "cosmossdk.io/core/transaction" - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/client/input" - "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/codec" ) -// GenerateOrBroadcastTxCLIWithBroadcaster will either generate and print an unsigned transaction +// GenerateAndBroadcastTxCLIWithBroadcaster will either generate and print an unsigned transaction // or sign it and broadcast it with the specified broadcaster returning an error upon failure. -func GenerateOrBroadcastTxCLIWithBroadcaster(ctx client.Context, flagSet *pflag.FlagSet, broadcaster broadcast.Broadcaster, msgs ...transaction.Msg) error { - if err := validateMessages(msgs...); err != nil { - return err +func GenerateAndBroadcastTxCLIWithBroadcaster( + ctx context.Context, + conn grpc.ClientConn, + broadcaster broadcast.Broadcaster, + msgs ...transaction.Msg, +) ([]byte, error) { + txf, err := initFactory(ctx, conn, msgs...) + if err != nil { + return nil, err + } + + err = generateTx(txf, msgs...) + if err != nil { + return nil, err + } + + return BroadcastTx(ctx, txf, broadcaster) +} + +// GenerateAndBroadcastTxCLI will either generate and print an unsigned transaction +// or sign it and broadcast it using default CometBFT broadcaster, returning an error upon failure. +func GenerateAndBroadcastTxCLI(ctx context.Context, conn grpc.ClientConn, msgs ...transaction.Msg) ([]byte, error) { + cBroadcaster, err := cometBroadcaster(ctx) + if err != nil { + return nil, err } - txf, err := newFactory(ctx, flagSet) + return GenerateAndBroadcastTxCLIWithBroadcaster(ctx, conn, cBroadcaster, msgs...) +} + +// GenerateAndBroadcastTxCLIWithPrompt generates, signs and broadcasts a transaction after prompting the user for confirmation. +// It takes a context, gRPC client connection, prompt function for user confirmation, and transaction messages. +// The prompt function receives the unsigned transaction bytes and returns a boolean indicating user confirmation and any error. +// Returns the broadcast response bytes and any error encountered. +func GenerateAndBroadcastTxCLIWithPrompt( + ctx context.Context, + conn grpc.ClientConn, + prompt func([]byte) (bool, error), + msgs ...transaction.Msg, +) ([]byte, error) { + txf, err := initFactory(ctx, conn, msgs...) if err != nil { - return err + return nil, err } - genOnly, _ := flagSet.GetBool(flagGenerateOnly) - if genOnly { - return generateOnly(ctx, txf, msgs...) + err = generateTx(txf, msgs...) + if err != nil { + return nil, err } - isDryRun, _ := flagSet.GetBool(flagDryRun) - if isDryRun { - return dryRun(txf, msgs...) + confirmed, err := askConfirmation(txf, prompt) + if err != nil { + return nil, err + } + if !confirmed { + return nil, nil } - return BroadcastTx(ctx, txf, broadcaster, msgs...) + cBroadcaster, err := cometBroadcaster(ctx) + if err != nil { + return nil, err + } + + return BroadcastTx(ctx, txf, cBroadcaster) } -// GenerateOrBroadcastTxCLI will either generate and print an unsigned transaction -// or sign it and broadcast it using default CometBFT broadcaster, returning an error upon failure. -func GenerateOrBroadcastTxCLI(ctx client.Context, flagSet *pflag.FlagSet, msgs ...transaction.Msg) error { - cometBroadcaster, err := getCometBroadcaster(ctx, flagSet) +// GenerateOnly generates an unsigned transaction without broadcasting it. +// It initializes a transaction factory using the provided context, connection and messages, +// then generates an unsigned transaction. +// Returns the unsigned transaction bytes and any error encountered. +func GenerateOnly(ctx context.Context, conn grpc.ClientConn, msgs ...transaction.Msg) ([]byte, error) { + txf, err := initFactory(ctx, conn) if err != nil { - return err + return nil, err } - return GenerateOrBroadcastTxCLIWithBroadcaster(ctx, flagSet, cometBroadcaster, msgs...) + return generateOnly(txf, msgs...) } -// getCometBroadcaster returns a new CometBFT broadcaster based on the provided context and flag set. -func getCometBroadcaster(ctx client.Context, flagSet *pflag.FlagSet) (broadcast.Broadcaster, error) { - url, _ := flagSet.GetString("node") - mode, _ := flagSet.GetString("broadcast-mode") - return comet.NewCometBFTBroadcaster(url, mode, ctx.Codec) +// DryRun simulates a transaction without broadcasting it to the network. +// It initializes a transaction factory using the provided context, connection and messages, +// then performs a dry run simulation of the transaction. +// Returns the simulation response bytes and any error encountered. +func DryRun(ctx context.Context, conn grpc.ClientConn, msgs ...transaction.Msg) ([]byte, error) { + txf, err := initFactory(ctx, conn, msgs...) + if err != nil { + return nil, err + } + + return dryRun(txf, msgs...) } -// newFactory creates a new transaction Factory based on the provided context and flag set. -// It initializes a new CLI keyring, extracts transaction parameters from the flag set, -// configures transaction settings, and sets up an account retriever for the transaction Factory. -func newFactory(ctx client.Context, flagSet *pflag.FlagSet) (Factory, error) { - k, err := keyring.NewAutoCLIKeyring(ctx.Keyring, ctx.AddressCodec) +// initFactory initializes a new transaction Factory and validates the provided messages. +// It retrieves the client v2 context from the provided context, validates all messages, +// and creates a new transaction Factory using the client context and connection. +// Returns the initialized Factory and any error encountered. +func initFactory(ctx context.Context, conn grpc.ClientConn, msgs ...transaction.Msg) (Factory, error) { + clientCtx, err := clientcontext.ClientContextFromGoContext(ctx) if err != nil { return Factory{}, err } + if err := validateMessages(msgs...); err != nil { + return Factory{}, err + } + + txf, err := newFactory(*clientCtx, conn) + if err != nil { + return Factory{}, err + } + + return txf, nil +} + +// getCometBroadcaster returns a new CometBFT broadcaster based on the provided context and flag set. +func getCometBroadcaster(cdc codec.Codec, flagSet *pflag.FlagSet) (broadcast.Broadcaster, error) { + url, _ := flagSet.GetString(flags.FlagNode) + mode, _ := flagSet.GetString(flags.FlagBroadcastMode) + return comet.NewCometBFTBroadcaster(url, mode, cdc) +} + +// newFactory creates a new transaction Factory based on the provided context and flag set. +// It initializes a new CLI keyring, extracts transaction parameters from the flag set, +// configures transaction settings, and sets up an account retriever for the transaction Factory. +func newFactory(ctx clientcontext.Context, conn grpc.ClientConn) (Factory, error) { txConfig, err := NewTxConfig(ConfigOptions{ AddressCodec: ctx.AddressCodec, - Cdc: ctx.Codec, + Cdc: ctx.Cdc, ValidatorAddressCodec: ctx.ValidatorAddressCodec, - EnabledSignModes: ctx.TxConfig.SignModeHandler().SupportedModes(), + EnabledSignModes: ctx.EnabledSignModes, }) if err != nil { return Factory{}, err } - accRetriever := account.NewAccountRetriever(ctx.AddressCodec, ctx, ctx.InterfaceRegistry) + accRetriever := account.NewAccountRetriever(ctx.AddressCodec, conn, ctx.Cdc.InterfaceRegistry()) - txf, err := NewFactoryFromFlagSet(flagSet, k, ctx.Codec, accRetriever, txConfig, ctx.AddressCodec, ctx) + txf, err := NewFactoryFromFlagSet(ctx.Flags, ctx.Keyring, ctx.Cdc, accRetriever, txConfig, ctx.AddressCodec, conn) if err != nil { return Factory{}, err } @@ -115,30 +189,29 @@ func validateMessages(msgs ...transaction.Msg) error { // generateOnly prepares the transaction and prints the unsigned transaction string. // It first calls Prepare on the transaction factory to set up any necessary pre-conditions. // If preparation is successful, it generates an unsigned transaction string using the provided messages. -func generateOnly(ctx client.Context, txf Factory, msgs ...transaction.Msg) error { +func generateOnly(txf Factory, msgs ...transaction.Msg) ([]byte, error) { uTx, err := txf.UnsignedTxString(msgs...) if err != nil { - return err + return nil, err } - return ctx.PrintString(uTx) + return []byte(uTx), nil } // dryRun performs a dry run of the transaction to estimate the gas required. // It prepares the transaction factory and simulates the transaction with the provided messages. -func dryRun(txf Factory, msgs ...transaction.Msg) error { +func dryRun(txf Factory, msgs ...transaction.Msg) ([]byte, error) { _, gas, err := txf.Simulate(msgs...) if err != nil { - return err + return nil, err } - _, err = fmt.Fprintf(os.Stderr, "%s\n", GasEstimateResponse{GasEstimate: gas}) - return err + return []byte(fmt.Sprintf(`{"gas_estimate": %d}`, gas)), nil } // SimulateTx simulates a tx and returns the simulation response obtained by the query. -func SimulateTx(ctx client.Context, flagSet *pflag.FlagSet, msgs ...transaction.Msg) (proto.Message, error) { - txf, err := newFactory(ctx, flagSet) +func SimulateTx(ctx clientcontext.Context, conn grpc.ClientConn, msgs ...transaction.Msg) (proto.Message, error) { + txf, err := newFactory(ctx, conn) if err != nil { return nil, err } @@ -147,10 +220,10 @@ func SimulateTx(ctx client.Context, flagSet *pflag.FlagSet, msgs ...transaction. return simulation, err } -// BroadcastTx attempts to generate, sign and broadcast a transaction with the -// given set of messages. It will also simulate gas requirements if necessary. -// It will return an error upon failure. -func BroadcastTx(clientCtx client.Context, txf Factory, broadcaster broadcast.Broadcaster, msgs ...transaction.Msg) error { +// generateTx generates an unsigned transaction using the provided transaction factory and messages. +// If simulation and execution are enabled, it first calculates the gas requirements. +// It then builds the unsigned transaction with the provided messages. +func generateTx(txf Factory, msgs ...transaction.Msg) error { if txf.simulateAndExecute() { err := txf.calculateGas(msgs...) if err != nil { @@ -158,58 +231,29 @@ func BroadcastTx(clientCtx client.Context, txf Factory, broadcaster broadcast.Br } } - err := txf.BuildUnsignedTx(msgs...) - if err != nil { - return err - } - - if !clientCtx.SkipConfirm { - encoder := txf.txConfig.TxJSONEncoder() - if encoder == nil { - return errors.New("failed to encode transaction: tx json encoder is nil") - } - - unsigTx, err := txf.getTx() - if err != nil { - return err - } - txBytes, err := encoder(unsigTx) - if err != nil { - return fmt.Errorf("failed to encode transaction: %w", err) - } - - if err := clientCtx.PrintRaw(txBytes); err != nil { - _, _ = fmt.Fprintf(os.Stderr, "error: %v\n%s\n", err, txBytes) - } + return txf.BuildUnsignedTx(msgs...) +} - buf := bufio.NewReader(os.Stdin) - ok, err := input.GetConfirmation("confirm transaction before signing and broadcasting", buf, os.Stderr) - if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "error: %v\ncanceled transaction\n", err) - return err - } - if !ok { - _, _ = fmt.Fprintln(os.Stderr, "canceled transaction") - return nil - } +// BroadcastTx attempts to sign and broadcast a transaction using the provided factory and broadcaster. +// GenerateTx must be called first to prepare the transaction for signing. +// This function then signs the transaction using the factory's signing capabilities, encodes it, +// and finally broadcasts it using the provided broadcaster. +func BroadcastTx(ctx context.Context, txf Factory, broadcaster broadcast.Broadcaster) ([]byte, error) { + if len(txf.tx.msgs) == 0 { + return nil, errors.New("no messages to broadcast") } - signedTx, err := txf.sign(clientCtx.CmdContext, true) + signedTx, err := txf.sign(ctx, true) if err != nil { - return err + return nil, err } txBytes, err := txf.txConfig.TxEncoder()(signedTx) if err != nil { - return err - } - - res, err := broadcaster.Broadcast(context.Background(), txBytes) - if err != nil { - return err + return nil, err } - return clientCtx.PrintString(string(res)) + return broadcaster.Broadcast(ctx, txBytes) } // countDirectSigners counts the number of DIRECT signers in a signature data. @@ -233,6 +277,38 @@ func countDirectSigners(sigData SignatureData) int { } } +// cometBroadcaster returns a broadcast.Broadcaster implementation that uses the CometBFT RPC client. +// It extracts the client context from the provided context and uses it to create a CometBFT broadcaster. +func cometBroadcaster(ctx context.Context) (broadcast.Broadcaster, error) { + c, err := clientcontext.ClientContextFromGoContext(ctx) + if err != nil { + return nil, err + } + + return getCometBroadcaster(c.Cdc, c.Flags) +} + +// askConfirmation encodes the transaction as JSON and prompts the user for confirmation using the provided prompter function. +// It returns the user's confirmation response and any error that occurred during the process. +func askConfirmation(txf Factory, prompter func([]byte) (bool, error)) (bool, error) { + encoder := txf.txConfig.TxJSONEncoder() + if encoder == nil { + return false, errors.New("failed to encode transaction: tx json encoder is nil") + } + + tx, err := txf.getTx() + if err != nil { + return false, err + } + + txBytes, err := encoder(tx) + if err != nil { + return false, fmt.Errorf("failed to encode transaction: %w", err) + } + + return prompter(txBytes) +} + // getSignMode returns the corresponding apitxsigning.SignMode based on the provided mode string. func getSignMode(mode string) apitxsigning.SignMode { switch mode { diff --git a/client/v2/tx/types.go b/client/v2/tx/types.go index a50b0b996b1d..801e246acae5 100644 --- a/client/v2/tx/types.go +++ b/client/v2/tx/types.go @@ -148,20 +148,21 @@ type Tx interface { // txParamsFromFlagSet extracts the transaction parameters from the provided FlagSet. func txParamsFromFlagSet(flags *pflag.FlagSet, keybase keyring2.Keyring, ac address.Codec) (params TxParameters, err error) { - timestampUnix, _ := flags.GetInt64(flagTimeoutTimestamp) + timestampUnix, _ := flags.GetInt64(FlagTimeoutTimestamp) timeoutTimestamp := time.Unix(timestampUnix, 0) - chainID, _ := flags.GetString(flagChainID) - memo, _ := flags.GetString(flagNote) - signMode, _ := flags.GetString(flagSignMode) + chainID, _ := flags.GetString(FlagChainID) + memo, _ := flags.GetString(FlagNote) + signMode, _ := flags.GetString(FlagSignMode) - accNumber, _ := flags.GetUint64(flagAccountNumber) - sequence, _ := flags.GetUint64(flagSequence) - from, _ := flags.GetString(flagFrom) + accNumber, _ := flags.GetUint64(FlagAccountNumber) + sequence, _ := flags.GetUint64(FlagSequence) + from, _ := flags.GetString(FlagFrom) var fromName, fromAddress string var addr []byte - isDryRun, _ := flags.GetBool(flagDryRun) - if isDryRun { + isDryRun, _ := flags.GetBool(FlagDryRun) + generateOnly, _ := flags.GetBool(FlagGenerateOnly) + if isDryRun || generateOnly { addr, err = ac.StringToBytes(from) } else { fromName, fromAddress, _, err = keybase.KeyInfo(from) @@ -173,16 +174,16 @@ func txParamsFromFlagSet(flags *pflag.FlagSet, keybase keyring2.Keyring, ac addr return params, err } - gas, _ := flags.GetString(flagGas) + gas, _ := flags.GetString(FlagGas) simulate, gasValue, _ := parseGasSetting(gas) - gasAdjustment, _ := flags.GetFloat64(flagGasAdjustment) - gasPrices, _ := flags.GetString(flagGasPrices) + gasAdjustment, _ := flags.GetFloat64(FlagGasAdjustment) + gasPrices, _ := flags.GetString(FlagGasPrices) - fees, _ := flags.GetString(flagFees) - feePayer, _ := flags.GetString(flagFeePayer) - feeGrater, _ := flags.GetString(flagFeeGranter) + fees, _ := flags.GetString(FlagFees) + feePayer, _ := flags.GetString(FlagFeePayer) + feeGrater, _ := flags.GetString(FlagFeeGranter) - unordered, _ := flags.GetBool(flagUnordered) + unordered, _ := flags.GetBool(FlagUnordered) gasConfig, err := NewGasConfig(gasValue, gasAdjustment, gasPrices) if err != nil { diff --git a/simapp/simd/cmd/root.go b/simapp/simd/cmd/root.go index cc7cf47742c1..7851629e7716 100644 --- a/simapp/simd/cmd/root.go +++ b/simapp/simd/cmd/root.go @@ -121,7 +121,10 @@ func NewRootCmd() *cobra.Command { } autoCliOpts := tempApp.AutoCliOpts() - autoCliOpts.ClientCtx = initClientCtx + autoCliOpts.AddressCodec = initClientCtx.AddressCodec + autoCliOpts.ValidatorAddressCodec = initClientCtx.ValidatorAddressCodec + autoCliOpts.ConsensusAddressCodec = initClientCtx.ConsensusAddressCodec + autoCliOpts.Cdc = initClientCtx.Codec nodeCmds := nodeservice.NewNodeCommands() autoCliOpts.ModuleOptions[nodeCmds.Name()] = nodeCmds.AutoCLIOptions() diff --git a/tests/systemtests/mint_test.go b/tests/systemtests/mint_test.go index 1be1a713743b..d1c43bd9d863 100644 --- a/tests/systemtests/mint_test.go +++ b/tests/systemtests/mint_test.go @@ -1,3 +1,5 @@ +//go:build system_test + package systemtests import ( From 5426cd8f345d337bd30b1e8bd2ea6eb44cf6abd0 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Tue, 3 Dec 2024 15:39:55 +0100 Subject: [PATCH 14/17] docs: rewrite building module section (1/n) - mm (#22724) --- UPGRADING.md | 2 + docs/build/building-apps/00-app-go.md | 14 -- docs/build/building-apps/00-runtime.md | 9 + docs/build/building-modules/00-intro.md | 7 +- .../building-modules/01-module-manager.md | 186 ++++++++---------- .../02-messages-and-queries.md | 39 ++-- .../build/building-modules/03-msg-services.md | 71 +++---- .../building-modules/04-query-services.md | 12 +- docs/build/building-modules/06-keeper.md | 2 + docs/learn/beginner/00-app-anatomy.md | 6 +- 10 files changed, 152 insertions(+), 196 deletions(-) delete mode 100644 docs/build/building-apps/00-app-go.md create mode 100644 docs/build/building-apps/00-runtime.md diff --git a/UPGRADING.md b/UPGRADING.md index 86d7b41d71b4..1f9ae2647d02 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -415,6 +415,8 @@ been added to avoid the use of the Accounts.String() method. +type MsgSimulatorFn func(r *rand.Rand, accs []Account, cdc address.Codec) (sdk.Msg, error) ``` +The interface `HasProposalMsgs` has been renamed to `HasLegacyProposalMsgs`, as we've introduced a new simulation framework, simpler and easier to use, named [simsx](https://github.com/cosmos/cosmos-sdk/blob/main/simsx/README.md). + ##### Depinject Previously `cosmossdk.io/core` held functions `Invoke`, `Provide` and `Register` were moved to `cosmossdk.io/depinject/appconfig`. diff --git a/docs/build/building-apps/00-app-go.md b/docs/build/building-apps/00-app-go.md deleted file mode 100644 index 5a0524f3bbf7..000000000000 --- a/docs/build/building-apps/00-app-go.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Overview of `app.go` - -This section is intended to provide an overview of the `SimApp` `app.go` file and is still a work in progress. -For now please instead read the [tutorials](https://tutorials.cosmos.network) for a deep dive on how to build a chain. - -## Complete `app.go` - -```go reference -https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/simapp/app.go -``` diff --git a/docs/build/building-apps/00-runtime.md b/docs/build/building-apps/00-runtime.md new file mode 100644 index 000000000000..a962019e2993 --- /dev/null +++ b/docs/build/building-apps/00-runtime.md @@ -0,0 +1,9 @@ +--- +sidebar_position: 1 +--- + +# What is `runtime`? + +The `runtime` package is the Cosmos SDK package that combines the building blocks of your blockchain together. It wires together the modules, the applications, the codecs, and the stores. + +A user only needs to import `runtime` in their `app.go` and instantiate a `runtime.App`. diff --git a/docs/build/building-modules/00-intro.md b/docs/build/building-modules/00-intro.md index 4eda49125a7b..6618a44caa61 100644 --- a/docs/build/building-modules/00-intro.md +++ b/docs/build/building-modules/00-intro.md @@ -17,9 +17,9 @@ Modules define most of the logic of Cosmos SDK applications. Developers compose ## Role of Modules in a Cosmos SDK Application -The Cosmos SDK can be thought of as the Ruby-on-Rails of blockchain development. It comes with a core that provides the basic functionalities every blockchain application needs, like a [boilerplate implementation of the ABCI](../../learn/advanced/00-baseapp.md) to communicate with the underlying consensus engine, a [`multistore`](../../learn/advanced/04-store.md#multistore) to persist state, a [server](../../learn/advanced/03-node.md) to form a full-node and [interfaces](./09-module-interfaces.md) to handle queries. +The Cosmos SDK can be thought of as the Next.js or Ruby-on-Rails of blockchain development. It comes with a core that provides the basic functionalities every blockchain application needs, like a [boilerplate implementation of the ABCI](../../learn/advanced/00-baseapp.md) to communicate with the underlying consensus engine, a [`multistore`](../../learn/advanced/04-store.md#multistore) to persist state, a [server](../../learn/advanced/03-node.md) to form a full-node and [interfaces](./09-module-interfaces.md) to handle queries. -On top of this core, the Cosmos SDK enables developers to build modules that implement the business logic of their application. In other words, SDK modules implement the bulk of the logic of applications, while the core does the wiring and enables modules to be composed together. The end goal is to build a robust ecosystem of open-source Cosmos SDK modules, making it increasingly easier to build complex blockchain applications. +On top of this core, the Cosmos SDK enables developers to build modules that implement the business logic of their application. In other words, SDK modules implement the bulk of the logic of applications, while the core does the wiring (via [runtime](../building-apps/00-runtime.md)) and enables modules to be composed together. The end goal is to build a robust ecosystem of open-source Cosmos SDK modules, making it increasingly easier to build complex blockchain applications. Cosmos SDK modules can be seen as little state-machines within the state-machine. They generally define a subset of the state using one or more `KVStore`s in the [main multistore](../../learn/advanced/04-store.md), as well as a subset of [message types](./02-messages-and-queries.md#messages). These messages are routed by one of the main components of Cosmos SDK core, [`BaseApp`](../../learn/advanced/00-baseapp.md), to a module Protobuf [`Msg` service](./03-msg-services.md) that defines them. @@ -42,8 +42,7 @@ flowchart TD As a result of this architecture, building a Cosmos SDK application usually revolves around writing modules to implement the specialized logic of the application and composing them with existing modules to complete the application. Developers will generally work on modules that implement logic needed for their specific use case that do not exist yet, and will use existing modules for more generic functionalities like staking, accounts, or token management. - -### Modules as Sudo +### Modules as super-users Modules have the ability to perform actions that are not available to regular users. This is because modules are given sudo permissions by the state machine. Modules can reject another modules desire to execute a function but this logic must be explicit. Examples of this can be seen when modules create functions to modify parameters: diff --git a/docs/build/building-modules/01-module-manager.md b/docs/build/building-modules/01-module-manager.md index 9742454320ec..1e3bf38aa95e 100644 --- a/docs/build/building-modules/01-module-manager.md +++ b/docs/build/building-modules/01-module-manager.md @@ -5,7 +5,7 @@ sidebar_position: 1 # Module Manager :::note Synopsis -Cosmos SDK modules need to implement the [`AppModule` interfaces](#application-module-interfaces), in order to be managed by the application's [module manager](#module-manager). The module manager plays an important role in [`message` and `query` routing](../../learn/advanced/00-baseapp.md#routing), and allows application developers to set the order of execution of a variety of functions like [`PreBlocker`](https://docs.cosmos.network/main/learn/beginner/app-anatomy) and [`BeginBlocker` and `EndBlocker`](https://docs.cosmos.network/main/learn/beginner/app-anatomy). +Cosmos SDK modules need to implement the [`AppModule` interfaces](#application-module-interfaces), in order to be managed by the application's [module manager](#module-manager). The module manager plays an important role in [`message` and `query` routing](../../learn/advanced/00-baseapp.md#routing), and allows application developers to set the order of execution of a variety of functions like [`PreBlocker`, `BeginBlocker` and `EndBlocker`](https://docs.cosmos.network/main/learn/beginner/app-anatomy). ::: :::note Pre-requisite Readings @@ -18,203 +18,189 @@ Cosmos SDK modules need to implement the [`AppModule` interfaces](#application-m Application module interfaces exist to facilitate the composition of modules together to form a functional Cosmos SDK application. -:::note +Those interface are defined in the `cosmossdk.io/core/appmodule` and `cosmossdk.io/core/appmodule/v2` packages. -It is recommended to implement interfaces from the [Core API](https://docs.cosmos.network/main/architecture/adr-063-core-module-api) `appmodule` package. This makes modules less dependent on the SDK. -For legacy reason modules can still implement interfaces from the SDK `module` package. +:::note +The difference between appmodule and appmodule v2 is mainly the introduction of handlers from Cosmos SDK (server) v2. The rest of the API remains the same, and are simply aliases between the two packages. ::: -There are 2 main application module interfaces: - -* [`appmodule.AppModule` / `module.AppModule`](#appmodule) for inter-dependent module functionalities (except genesis-related functionalities). - -The above interfaces are mostly embedding smaller interfaces (extension interfaces), that defines specific functionalities: - - +Following a list of all interfaces a module can implement: -* (legacy) [`module.HasGenesisBasics`](#modulehasgenesisbasics): The legacy interface for stateless genesis methods. -* (legacy) [`module.HasGenesis`](#modulehasgenesis) for inter-dependent genesis-related module functionalities. -* (legacy) [`module.HasABCIGenesis`](#modulehasabcigenesis) for inter-dependent genesis-related module functionalities. +* [`appmodule.AppModule`](#appmodule) is the main interface that defines a module. By default, a module does nothing. To add functionalities, a module can implement extension interfaces. * [`appmodule.HasPreBlocker`](#haspreblocker): The extension interface that contains information about the `AppModule` and `PreBlock`. * [`appmodule.HasBeginBlocker`](#hasbeginblocker): The extension interface that contains information about the `AppModule` and `BeginBlock`. * [`appmodule.HasEndBlocker`](#hasendblocker): The extension interface that contains information about the `AppModule` and `EndBlock`. -* [`appmodule.HasService` / `module.HasServices`](#hasservices): The extension interface for modules to register services. -* [`module.HasABCIEndBlock`](#hasabciendblock): The extension interface that contains information about the `AppModule`, `EndBlock` and returns an updated validator set. -* (legacy) [`module.HasConsensusVersion`](#hasconsensusversion): The extension interface for declaring a module consensus version. +* [`module.HasABCIEndBlock`](#hasendblocker): The extension interface that contains information about the `AppModule`, `EndBlock` and returns an updated validator set (Usually only needed by staking). +* [`appmodule.HasRegisterInterfaces`](#hasregisterinterfaces): The extension interface for modules to register their message types. +* [`appmodule.HasService`](#hasservices): The extension interface for modules to register services. Note, this interface is not exposed in core to avoid a gRPC dependency. However it is usable in an application. +* [`appmodule.HasAminoCodec`](#hasaminocodec): The extension interface for modules to support JSON encoding and decoding via `amino`. +* [`appmodule.HasMigrations`](#hasmigrations): The extension interface for registering module migrations. + * [`appmodule.HasConsensusVersion`](#hasconsensusversion): The extension interface for declaring a module consensus version. It is usually not used alone, but in conjunction with `HasMigrations`. +* [`appmodule.HasGenesis`](#hasgenesis) for inter-dependent genesis-related module functionalities. +* [`appmodule.HasABCIGenesis`](#hasabcigenesis) for inter-dependent genesis-related module functionalities, with validator set updates (Usually only needed by staking). The `AppModule` interface exists to define inter-dependent module methods. Many modules need to interact with other modules, typically through [`keeper`s](./06-keeper.md), which means there is a need for an interface where modules list their `keeper`s and other methods that require a reference to another module's object. `AppModule` interface extension, such as `HasBeginBlocker` and `HasEndBlocker`, also enables the module manager to set the order of execution between module's methods like `BeginBlock` and `EndBlock`, which is important in cases where the order of execution between modules matters in the context of the application. The usage of extension interfaces allows modules to define only the functionalities they need. For example, a module that does not need an `EndBlock` does not need to define the `HasEndBlocker` interface and thus the `EndBlock` method. `AppModule` and `AppModuleGenesis` are voluntarily small interfaces, that can take advantage of the `Module` patterns without having to define many placeholder functions. -### `HasAminoCodec` +:::note legacy +Prior to the introduction of the `cosmossdk.io/core` package the interfaces were defined in the `types/module` package of the Cosmos SDK. Not all interfaces have been migrated to core. Those legacy interfaces are still supported for backward compatability, but aren't described in this document and should not be used in new modules. +::: -```go reference -https://github.com/cosmos/cosmos-sdk/blob/eee5e21e1c8d0995b6d4f83b7f55ec0b58d27ba7/core/appmodule/module.go#L74-L78 -``` -* `RegisterLegacyAminoCodec(registry.AminoRegistrar)`: Registers the `amino` codec for the module, which is used to marshal and unmarshal structs to/from `[]byte` in order to persist them in the module's `KVStore`. +### `AppModule` -### `HasRegisterInterfaces` +The `AppModule` interface defines a module. Modules can declare their functionalities by implementing extensions interfaces. +`AppModule`s are managed by the [module manager](#manager), which checks which extension interfaces are implemented by the module. ```go reference -https://github.com/cosmos/cosmos-sdk/blob/eee5e21e1c8d0995b6d4f83b7f55ec0b58d27ba7/core/appmodule/v2/module.go#L103-L106 +https://github.com/cosmos/cosmos-sdk/blob/core/v1.0.0-alpha.6/core/appmodule/v2/module.go#L10-L20 ``` -* `RegisterInterfaces(codectypes.InterfaceRegistry)`: Registers a module's interface types and their concrete implementations as `proto.Message`. +### `HasPreBlocker` -### `HasGRPCGateway` +The `HasPreBlocker` is an extension interface from `appmodule.AppModule`. All modules that have an `PreBlock` method implement this interface. ```go reference -https://github.com/cosmos/cosmos-sdk/blob/eee5e21e1c8d0995b6d4f83b7f55ec0b58d27ba7/types/module/module.go#L84-L87 +https://github.com/cosmos/cosmos-sdk/blob/core/v1.0.0-alpha.6/core/appmodule/v2/module.go#L22-L28 ``` -* `RegisterGRPCGatewayRoutes(client.Context, *runtime.ServeMux)`: Registers gRPC routes for the module. - -### Genesis - -:::tip -For easily creating an `AppModule` that only has genesis functionalities, implement `module.HasGenesis/HasABCIGenesis`. -::: +### `HasBeginBlocker` -#### `module.HasGenesisBasics` +The `HasBeginBlocker` is an extension interface from `appmodule.AppModule`. All modules that have an `BeginBlock` method implement this interface. +It gives module developers the option to implement logic that is automatically triggered at the beginning of each block. ```go reference -https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/types/module/module.go#L76-L79 +https://github.com/cosmos/cosmos-sdk/blob/core/v1.0.0-alpha.6/core/appmodule/v2/module.go#L30-L38 ``` -Let us go through the methods: - -* `DefaultGenesis(codec.JSONCodec)`: Returns a default [`GenesisState`](./08-genesis.md#genesisstate) for the module, marshalled to `json.RawMessage`. The default `GenesisState` need to be defined by the module developer and is primarily used for testing. -* `ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)`: Used to validate the `GenesisState` defined by a module, given in its `json.RawMessage` form. It will usually unmarshall the `json` before running a custom [`ValidateGenesis`](./08-genesis.md#validategenesis) function defined by the module developer. - -#### `module.HasGenesis` +### `HasEndBlocker` -`HasGenesis` is an extension interface for allowing modules to implement genesis functionalities. +The `HasEndBlocker` is an extension interface from `appmodule.AppModule`. All modules that have an `EndBlock` method implement this interface. It gives module developers the option to implement logic that is automatically triggered at the end of each block. ```go reference -https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/types/module/module.go#L184-L189 +https://github.com/cosmos/cosmos-sdk/blob/core/v1.0.0-alpha.6/core/appmodule/v2/module.go#L40-L48 ``` -#### `module.HasABCIGenesis` - -`HasABCIGenesis` is an extension interface for allowing modules to implement genesis functionalities and returns validator set updates. +If a module needs to return validator set updates (staking), they can use `HasABCIEndBlock` (in v1). ```go reference -https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/types/module/module.go#L94-L98 +https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.2/types/module/module.go#L115-L119 ``` -### `AppModule` - -The `AppModule` interface defines a module. Modules can declare their functionalities by implementing extensions interfaces. -`AppModule`s are managed by the [module manager](#manager), which checks which extension interfaces are implemented by the module. +Or, alternatively, `HasUpdateValidators` in v2: -#### `appmodule.AppModule` ```go reference -https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/core/appmodule/module.go#L11-L20 +https://github.com/cosmos/cosmos-sdk/blob/core/v1.0.0-alpha.6/core/appmodule/v2/module.go#L87-L94 ``` -#### `module.AppModule` +### `HasRegisterInterfaces` -:::note -Previously the `module.AppModule` interface was containing all the methods that are defined in the extensions interfaces. This was leading to much boilerplate for modules that did not need all the functionalities. -::: +The `HasRegisterInterfaces` is an extension interface from `appmodule.AppModule`. All modules that have a `RegisterInterfaces` method implement this interface. It allows modules to register their message types with their concrete implementations as `proto.Message`. ```go reference -https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/core/appmodule/v2/module.go#L14-L20 +https://github.com/cosmos/cosmos-sdk/blob/core/v1.0.0-alpha.6/core/appmodule/v2/module.go#L103-L106 ``` ### `HasServices` -This interface defines one method. It allows to checks if a module can register invariants. - -#### `appmodule.HasService` +This interface defines one method. It allows to register and let module expose gRPC services. +This interface is not part of the `core` package to avoid a gRPC dependency, but is recognized by the module manager and [runtime](../building-apps/00-runtime.md). ```go reference -https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/core/appmodule/module.go#L22-L40 +https://github.com/cosmos/cosmos-sdk/blob/core/v1.0.0-alpha.6/core/appmodule/module.go#L34-L53 ``` -#### `module.HasServices` +### `HasAminoCodec` + +The `HasAminoCodec` allows to register the `amino` codec for the module, which is used to marshal and unmarshal structs to/from `[]byte` in order to persist them in the module's `KVStore`. ```go reference -https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/types/module/module.go#L208-L211 +https://github.com/cosmos/cosmos-sdk/blob/core/v1.0.0-alpha.6/core/appmodule/module.go#L68-L72 ``` -* `RegisterServices(Configurator)`: Allows a module to register services. - -### `HasConsensusVersion` +### `module.HasGRPCGateway` -This interface defines one method for checking a module consensus version. +This interface is not part of the `core` package to avoid a gRPC dependency. It is used to register gRPC routes gateway routes for the module. [In v2, this will be done differently, and totally abstracted from modules and module manager](https://github.com/cosmos/cosmos-sdk/issues/22715) ```go reference -https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/core/appmodule/v2/migrations.go#L6-L12 +https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.2/types/module/module.go#L74-L77 ``` -* `ConsensusVersion() uint64`: Returns the consensus version of the module. +### `HasMigrations` -### `HasPreBlocker` +The `HasMigrations` interface is used to register module migrations. Learn more about [module migrations](./13-upgrade.md). -The `HasPreBlocker` is an extension interface from `appmodule.AppModule`. All modules that have an `PreBlock` method implement this interface. +```go reference +https://github.com/cosmos/cosmos-sdk/blob/core/v1.0.0-alpha.6/core/appmodule/v2/migrations.go#L14-L21 +``` -### `HasBeginBlocker` +### `HasConsensusVersion` -The `HasBeginBlocker` is an extension interface from `appmodule.AppModule`. All modules that have an `BeginBlock` method implement this interface. +This interface defines one method for checking a module consensus version. It is mainly used in conjunction with `HasMigrations`. ```go reference -https://github.com/cosmos/cosmos-sdk/blob/28fa3b8dfcb3208d3b1cfbae08eda519e4cc1560/core/appmodule/v2/module.go#L30-L38 +https://github.com/cosmos/cosmos-sdk/blob/core/v1.0.0-alpha.6/core/appmodule/v2/migrations.go#L5-L12 ``` -* `BeginBlock(context.Context) error`: This method gives module developers the option to implement logic that is automatically triggered at the beginning of each block. +### Genesis -### `HasEndBlocker` +#### `HasGenesis` -The `HasEndBlocker` is an extension interface from `appmodule.AppModule`. All modules that have an `EndBlock` method implement this interface. If a module needs to return validator set updates (staking), they can use `HasABCIEndBlock` +`HasGenesis` is an extension interface for allowing modules to implement genesis functionalities. ```go reference -https://github.com/cosmos/cosmos-sdk/blob/28fa3b8dfcb3208d3b1cfbae08eda519e4cc1560/core/appmodule/v2/module.go#L40-L48 +https://github.com/cosmos/cosmos-sdk/blob/core/v1.0.0-alpha.6/core/appmodule/v2/genesis.go#L8-L19 ``` -* `EndBlock(context.Context) error`: This method gives module developers the option to implement logic that is automatically triggered at the end of each block. +Let us go through some of the methods: -### `HasABCIEndBlock` +* `DefaultGenesis()`: Returns a default [`GenesisState`](./08-genesis.md#genesisstate) for the module, marshalled to `json.RawMessage`. The default `GenesisState` need to be defined by the module developer and is primarily used for testing. +* `ValidateGenesis(data json.RawMessage) error`: Used to validate the `GenesisState` defined by a module, given in its `json.RawMessage` form. It will usually unmarshall the `json` before running a custom [`ValidateGenesis`](./08-genesis.md#validategenesis) function defined by the module developer. -The `HasUpdateValidators` is an extension interface from `module.AppModule`. All modules that have an `EndBlock` which return validator set updates implement this interface. +In the same vein than `HasABCIEndBlock`, `HasABCIGenesis` is used to return validator set updates. -```go reference -https://github.com/cosmos/cosmos-sdk/blob/28fa3b8dfcb3208d3b1cfbae08eda519e4cc1560/core/appmodule/v2/module.go#L87-L94 -``` +#### `HasABCIGenesis` -* `UpdateValidators(context.Context) ([]abci.ValidatorUpdate, error)`: This method gives module developers the option to inform the underlying consensus engine of validator set changes (e.g. the `staking` module). +`HasABCIGenesis` is an extension interface for allowing modules to implement genesis functionalities and returns validator set updates. +```go reference +https://github.com/cosmos/cosmos-sdk/blob/core/v1.0.0-alpha.6/core/appmodule/v2/genesis.go#L21-L31 +``` ### Implementing the Application Module Interfaces - Typically, the various application module interfaces are implemented in a file called `module.go`, located in the module's folder (e.g. `./x/module/module.go`). -Almost every module needs to implement the `AppModule` interfaces. If the module is only used for genesis, it will implement `AppModuleGenesis` instead of `AppModule`. The concrete type that implements the interface can add parameters that are required for the implementation of the various methods of the interface. +Every module must implement the `AppModule` interface. If the module is only used for genesis, it will implement `HasGenesis` in addition of `AppModule`. The concrete type that implements the interface can add parameters that are required for the implementation of the various methods of the interface. ```go // example type AppModule struct { keeper Keeper } -``` -In the example above, you can see that the `AppModule` concrete type references an `AppModuleBasic`, and not an `AppModuleGenesis`. That is because `AppModuleGenesis` only needs to be implemented in modules that focus on genesis-related functionalities. In most modules, the concrete `AppModule` type will have a reference to an `AppModuleBasic` and implement the two added methods of `AppModuleGenesis` directly in the `AppModule` type. +func (AppModule) IsAppModule() {} +func (AppModule) IsOnePerModuleType() {} +``` ## Module Manager -The module manager is used to manage collections of `appmodule.AppModule` and `AppModule` and all the extensions interfaces. +The module manager is used to manage collections of `AppModule` and all the extensions interfaces. ### `Manager` The `Manager` is a structure that holds all the `AppModule` of an application, and defines the order of execution between several key components of these modules: ```go reference -https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/types/module/module.go#L267-L276 +https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.2/types/module/module.go#L121-L133 ``` +:::tip +Thanks to `runtime`, a user does not need to interact directly with the `Manager`. The `Manager` is used internally by the `runtime` to manage the modules of the application. +::: + The module manager is used throughout the application whenever an action on a collection of modules is required. It implements the following methods: * `NewManager(modules ...AppModule)`: Constructor function. It takes a list of the application's `AppModule`s and builds a new `Manager`. It is generally called from the application's main [constructor function](../../learn/beginner/00-app-anatomy.md#constructor-function). @@ -242,18 +228,8 @@ The module manager is used throughout the application whenever an action on a co * `DefaultGenesis(cdc codec.JSONCodec)`: Provides default genesis information for modules in the application by calling the [`DefaultGenesis(cdc codec.JSONCodec)`](./08-genesis.md#defaultgenesis) function of each module. It only calls the modules that implements the `HasGenesisBasics` interfaces. * `ValidateGenesis(cdc codec.JSONCodec, txEncCfg client.TxEncodingConfig, genesis map[string]json.RawMessage)`: Validates the genesis information modules by calling the [`ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)`](./08-genesis.md#validategenesis) function of modules implementing the `HasGenesisBasics` interface. -Here's an example of a concrete integration within an `simapp`: - -```go reference -https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/simapp/app.go#L411-L434 -``` - -This is the same example from `runtime` (the package that powers app di): - -```go reference -https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/runtime/module.go#L61 -``` +Here's an example of a concrete integration within [`runtime`](../building-apps/00-runtime.md) ```go reference -https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/runtime/module.go#L82 +https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.2/runtime/module.go#L242-L244 ``` diff --git a/docs/build/building-modules/02-messages-and-queries.md b/docs/build/building-modules/02-messages-and-queries.md index 5ec1c7a707e9..fc7c6e29a126 100644 --- a/docs/build/building-modules/02-messages-and-queries.md +++ b/docs/build/building-modules/02-messages-and-queries.md @@ -24,11 +24,10 @@ When a transaction is relayed from the underlying consensus engine to the Cosmos Defining Protobuf `Msg` services is the recommended way to handle messages. A Protobuf `Msg` service should be created for each module, typically in `tx.proto` (see more info about [conventions and naming](../../learn/advanced/05-encoding.md#faq)). It must have an RPC service method defined for each message in the module. - -Each `Msg` service method must have exactly one argument, which must implement the `transaction.Msg` interface, and a Protobuf response. The naming convention is to call the RPC argument `Msg` and the RPC response `MsgResponse`. For example: +Each `Msg` service method must have exactly one argument, which must implement the [`transaction.Msg`](https://pkg.go.dev/cosmossdk.io/core@v1.0.0-alpha.6/transaction#Msg) interface, and a Protobuf response. The naming convention is to call the RPC argument `Msg` and the RPC response `MsgResponse`. For example: ```protobuf - rpc Send(MsgSend) returns (MsgSendResponse); +rpc Send(MsgSend) returns (MsgSendResponse); ``` See an example of a `Msg` service definition from `x/bank` module: @@ -45,37 +44,29 @@ https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/x/bank/proto/cosmos/bank/v1bet https://github.com/cosmos/cosmos-sdk/blob/main/core/transaction/transaction.go#L8 ``` -To attach a `ValidateBasic()` method to a message, then you must add methods to the type adhereing to the `HasValidateBasic`. - -```go reference -https://github.com/cosmos/cosmos-sdk/blob/9c1e8b247cd47b5d3decda6e86fbc3bc996ee5d7/types/tx_msg.go#L84-L88 -``` - -In 0.50+ signers from the `GetSigners()` call is automated via a protobuf annotation. - +Signers from the `GetSigners()` call is automated via a protobuf annotation. Read more about the signer field [here](./05-protobuf-annotations.md). ```protobuf reference -https://github.com/cosmos/cosmos-sdk/blob/e6848d99b55a65d014375b295bdd7f9641aac95e/proto/cosmos/bank/v1beta1/tx.proto#L40 +https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.2/x/bank/proto/cosmos/bank/v1beta1/tx.proto#L45 ``` If there is a need for custom signers then there is an alternative path which can be taken. A function which returns `signing.CustomGetSigner` for a specific message can be defined. ```go func ProvideBankSendTransactionGetSigners() signing.CustomGetSigner { - - // Extract the signer from the signature. - signer, err := coretypes.LatestSigner(Tx).Sender(ethTx) - if err != nil { - return nil, err - } - - // Return the signer in the required format. - return [][]byte{signer.Bytes()}, nil + // Extract the signer from the signature. + signer, err := coretypes.LatestSigner(Tx).Sender(ethTx) + if err != nil { + return nil, err + } + + // Return the signer in the required format. + return [][]byte{signer.Bytes()}, nil } ``` -When using dependency injection (depinject) this can be provided to the application via the provide method. +This can be provided to the application using depinject's `Provide` method in the application's `app.go`: ```go depinject.Provide(banktypes.ProvideBankSendTransactionGetSigners) @@ -88,7 +79,7 @@ The Cosmos SDK uses Protobuf definitions to generate client and server code: A `RegisterMsgServer` method is also generated and should be used to register the module's `MsgServer` implementation in `RegisterServices` method from the [`AppModule` interface](./01-module-manager.md#appmodule). -In order for clients (CLI and grpc-gateway) to have these URLs registered, the Cosmos SDK provides the function `RegisterMsgServiceDesc(registry codectypes.InterfaceRegistry, sd *grpc.ServiceDesc)` that should be called inside module's [`RegisterInterfaces`](01-module-manager.md#hasregisterinterfaces) method, using the proto-generated `&_Msg_serviceDesc` as `*grpc.ServiceDesc` argument. +In order for clients (CLI and gRPC-gateway) to have these URLs registered, the Cosmos SDK provides the function `RegisterMsgServiceDesc(registry codectypes.InterfaceRegistry, sd *grpc.ServiceDesc)` that should be called inside module's [`RegisterInterfaces`](01-module-manager.md#hasregisterinterfaces) method, using the proto-generated `&_Msg_serviceDesc` as `*grpc.ServiceDesc` argument. ## Queries @@ -102,7 +93,7 @@ Queries should be defined using [Protobuf services](https://protobuf.dev/program Here's an example of such a `Query` service definition: ```protobuf reference -https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/proto/cosmos/auth/v1beta1/query.proto#L14-L89 +https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.2/proto/cosmos/auth/v1beta1/query.proto#L15-L81 ``` As `proto.Message`s, generated `Response` types implement by default `String()` method of [`fmt.Stringer`](https://pkg.go.dev/fmt#Stringer). diff --git a/docs/build/building-modules/03-msg-services.md b/docs/build/building-modules/03-msg-services.md index 14f906119015..1a06925b70e4 100644 --- a/docs/build/building-modules/03-msg-services.md +++ b/docs/build/building-modules/03-msg-services.md @@ -17,7 +17,7 @@ A Protobuf `Msg` service processes [messages](./02-messages-and-queries.md#messa ## Implementation of a module `Msg` service -Each module should define a Protobuf `Msg` service, which will be responsible for processing requests (implementing `sdk.Msg`) and returning responses. +Each module should define a Protobuf `Msg` service, which will be responsible for processing requests (implementing `transaction.Msg`) and returning responses. As further described in [ADR 031](../architecture/adr-031-msg-service.md), this approach has the advantage of clearly specifying return types and generating server and client code. @@ -30,24 +30,12 @@ https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/x/bank/types/tx.pb.go#L564-L57 When possible, the existing module's [`Keeper`](./06-keeper.md) should implement `MsgServer`, otherwise a `msgServer` struct that embeds the `Keeper` can be created, typically in `./keeper/msg_server.go`: ```go reference -https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/x/bank/keeper/msg_server.go#L16-L19 +https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.2/x/bank/keeper/msg_server.go#L13-L15 ``` -`msgServer` methods can retrieve the auxiliary information or services using the environment variable, it is always located in the keeper: +`msgServer` methods can retrieve the auxiliary information or services using the environment variable, it is should always be located in the [keeper](./06-keeper.md). -Environment: - -```go reference -https://github.com/cosmos/cosmos-sdk/blob/07151304e2ec6a185243d083f59a2d543253cb15/core/appmodule/v2/environment.go#L14-L29 -``` - -Keeper Example: - -```go reference -https://github.com/cosmos/cosmos-sdk/blob/07151304e2ec6a185243d083f59a2d543253cb15/x/bank/keeper/keeper.go#L56-L58 -``` - -`transaction.Msg` processing usually follows these 3 steps: +A `transaction.Msg` processing usually follows these 3 steps: ### Validation @@ -79,31 +67,48 @@ After the validation is successful, the `msgServer` method uses the [`keeper`](. ### Events -Before returning, `msgServer` methods generally emit one or more [events](../../learn/advanced/08-events.md) by using the `EventManager` held in `environment`. +Before returning, `msgServer` methods generally emit one or more [events](../../learn/advanced/08-events.md) by using the `EventService` held in `environment`. There are two ways to emit events, typed events using protobuf or arbitrary key & values. -Typed Events: +For typed events: ```go -ctx.EventManager().EmitTypedEvent( - &group.EventABC{Key1: Value1, Key2, Value2}) +environment.EventService.EventManager(ctx).Emit(&group.EventABC{Key1: Value1, Key2, Value2}) ``` -Arbitrary Events: +Or using simple KV events: ```go -ctx.EventManager().EmitEvent( - sdk.NewEvent( +environment.EventService.EventManager(ctx).EmitKV( eventType, // e.g. sdk.EventTypeMessage for a message, types.CustomEventType for a custom event defined in the module - sdk.NewAttribute(key1, value1), - sdk.NewAttribute(key2, value2), - ), + event.Attribute{Key: key1, Value: value1}, + event.Attribute{Key: key2, Value: value2}, ) ``` These events are relayed back to the underlying consensus engine and can be used by service providers to implement services around the application. Click [here](../../learn/advanced/08-events.md) to learn more about events. +### Telemetry + +:::Warning +Telemetry adds a performance overhead to the chain. It is recommended to only use this in critical paths +::: + +New [telemetry metrics](../../learn/advanced/09-telemetry.md) can be created from `msgServer` methods when handling messages. + +This is an example from the `x/auth/vesting` module: + +```go reference +https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/x/auth/vesting/msg_server.go#L76-L88 +``` + +## How it works + +:::warning +This flow concerns only a Cosmos SDK *baseapp*, and not Cosmos SDK v2. +::: + The invoked `msgServer` method returns a `proto.Message` response and an `error`. These return values are then wrapped into an `*sdk.Result` or an `error`: ```go reference @@ -149,17 +154,3 @@ sequenceDiagram baseApp->>User: result, error code ``` - -## Telemetry - -New [telemetry metrics](../../learn/advanced/09-telemetry.md) can be created from `msgServer` methods when handling messages. - -This is an example from the `x/auth/vesting` module: - -```go reference -https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/x/auth/vesting/msg_server.go#L76-L88 -``` - -:::Warning -Telemetry adds a performance overhead to the chain. It is recommended to only use this in critical paths -::: diff --git a/docs/build/building-modules/04-query-services.md b/docs/build/building-modules/04-query-services.md index a787a0c22b6f..9486482ea6cd 100644 --- a/docs/build/building-modules/04-query-services.md +++ b/docs/build/building-modules/04-query-services.md @@ -5,7 +5,7 @@ sidebar_position: 1 # Query Services :::note Synopsis -A Protobuf Query service processes [`queries`](./02-messages-and-queries.md#queries). Query services are specific to the module in which they are defined, and only process `queries` defined within said module. They are called from `BaseApp`'s [`Query` method](../../learn/advanced/00-baseapp.md#query). +A Protobuf Query service processes [`queries`](./02-messages-and-queries.md#queries). Query services are specific to the module in which they are defined, and only process `queries` defined within said module. ::: :::note Pre-requisite Readings @@ -28,18 +28,17 @@ type QueryServer interface { } ``` -These custom queries methods should be implemented by a module's keeper, typically in `./keeper/grpc_query.go`. The first parameter of these methods is a generic `context.Context`. Therefore, the Cosmos SDK provides a function `sdk.UnwrapSDKContext` to retrieve the `context.Context` from the provided -`context.Context`. +These custom queries methods should be implemented by a module's keeper, typically in `./keeper/grpc_query.go`. Here's an example implementation for the bank module: ```go reference -https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/x/bank/keeper/grpc_query.go +https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.2/x/bank/keeper/grpc_query.go#L20-L48 ``` ### Calling queries from the State Machine -The Cosmos SDK v0.47 introduces a new `cosmos.query.v1.module_query_safe` Protobuf annotation which is used to state that a query that is safe to be called from within the state machine, for example: +The `cosmos.query.v1.module_query_safe` protobuf annotation is used to state that a query that is safe to be called from within the state machine, for example: * a Keeper's query function can be called from another module's Keeper, * ADR-033 intermodule query calls, @@ -53,5 +52,4 @@ If the `module_query_safe` annotation set to `true`, it means: If you are a module developer and want to use `module_query_safe` annotation for your own query, you have to ensure the following things: * the query is deterministic and won't introduce state-machine-breaking changes without coordinated upgrades -* it has its gas tracked, to avoid the attack vector where no gas is accounted for - on potentially high-computation queries. +* it has its gas tracked, to avoid the attack vector where no gas is accounted for on potentially high-computation queries. diff --git a/docs/build/building-modules/06-keeper.md b/docs/build/building-modules/06-keeper.md index 0bd776ff9b9d..59180170cfd7 100644 --- a/docs/build/building-modules/06-keeper.md +++ b/docs/build/building-modules/06-keeper.md @@ -53,6 +53,8 @@ Let us go through the different parameters: Of course, it is possible to define different types of internal `keeper`s for the same module (e.g. a read-only `keeper`). Each type of `keeper` comes with its own constructor function, which is called from the [application's constructor function](../../learn/beginner/00-app-anatomy.md). This is where `keeper`s are instantiated, and where developers make sure to pass correct instances of modules' `keeper`s to other modules that require them. +## Environment + ## Implementing Methods `Keeper`s primarily expose methods for business logic, as validity checks should have already been performed by the [`Msg` server](./03-msg-services.md) when `keeper`s' methods are called. diff --git a/docs/learn/beginner/00-app-anatomy.md b/docs/learn/beginner/00-app-anatomy.md index edd8611c3e46..88e54fbf7975 100644 --- a/docs/learn/beginner/00-app-anatomy.md +++ b/docs/learn/beginner/00-app-anatomy.md @@ -57,7 +57,9 @@ In general, the core of the state-machine is defined in a file called `app.go`. ### Type Definition of the Application -The first thing defined in `app.go` is the `type` of the application. It is generally comprised of the following parts: + + + ### Constructor Function From 94cfcc11aaf543179bc91caaa601e9d80c91ecd4 Mon Sep 17 00:00:00 2001 From: Marko Date: Tue, 3 Dec 2024 17:18:19 +0100 Subject: [PATCH 15/17] refactor(store/v2)!: simplify storage (#22683) --- runtime/v2/builder.go | 1 + server/v2/cometbft/abci_test.go | 6 +- .../v2/cometbft/internal/mock/mock_reader.go | 8 +- .../v2/cometbft/internal/mock/mock_store.go | 24 +- server/v2/cometbft/server.go | 2 - server/v2/stf/branch/bench_test.go | 1 + server/v2/store/snapshot.go | 7 +- store/iavl/store_test.go | 2 - store/v2/commitment/iavl/tree.go | 30 + store/v2/commitment/store.go | 32 +- store/v2/commitment/store_test_suite.go | 69 +- store/v2/database.go | 15 +- store/v2/migration/README.md | 2 +- store/v2/migration/manager.go | 85 +- store/v2/migration/manager_test.go | 271 ++--- store/v2/mock/db_mock.go | 240 +--- store/v2/mock/types.go | 8 +- store/v2/pruning/manager.go | 20 +- store/v2/pruning/manager_test.go | 36 +- store/v2/root/factory.go | 50 +- store/v2/root/migrate_test.go | 17 +- store/v2/root/store.go | 80 +- store/v2/root/store_mock_test.go | 39 +- store/v2/root/store_test.go | 55 +- store/v2/root/upgrade_test.go | 16 +- store/v2/snapshots/helpers_test.go | 39 +- store/v2/snapshots/manager.go | 36 +- store/v2/snapshots/manager_test.go | 32 +- store/v2/snapshots/snapshotter.go | 9 +- store/v2/storage/README.md | 107 -- store/v2/storage/database.go | 27 - store/v2/storage/pebbledb/batch.go | 98 -- store/v2/storage/pebbledb/comparator.go | 242 ---- store/v2/storage/pebbledb/comparator_test.go | 58 - store/v2/storage/pebbledb/db.go | 528 --------- store/v2/storage/pebbledb/db_test.go | 28 - store/v2/storage/pebbledb/iterator.go | 437 ------- store/v2/storage/rocksdb/batch.go | 67 -- store/v2/storage/rocksdb/comparator.go | 76 -- store/v2/storage/rocksdb/db.go | 251 ---- store/v2/storage/rocksdb/db_noflag.go | 70 -- store/v2/storage/rocksdb/db_test.go | 90 -- store/v2/storage/rocksdb/iterator.go | 159 --- store/v2/storage/rocksdb/opts.go | 125 -- store/v2/storage/storage_bench_test.go | 182 --- store/v2/storage/storage_test_suite.go | 1056 ----------------- store/v2/storage/store.go | 162 --- store/v2/storage/util/iterator.go | 53 - store/v2/store.go | 3 - .../integration/accounts/base_account_test.go | 4 + tests/integration/accounts/bundler_test.go | 1 + tests/integration/v2/auth/app_test.go | 2 - 52 files changed, 366 insertions(+), 4692 deletions(-) delete mode 100644 store/v2/storage/README.md delete mode 100644 store/v2/storage/database.go delete mode 100644 store/v2/storage/pebbledb/batch.go delete mode 100644 store/v2/storage/pebbledb/comparator.go delete mode 100644 store/v2/storage/pebbledb/comparator_test.go delete mode 100644 store/v2/storage/pebbledb/db.go delete mode 100644 store/v2/storage/pebbledb/db_test.go delete mode 100644 store/v2/storage/pebbledb/iterator.go delete mode 100644 store/v2/storage/rocksdb/batch.go delete mode 100644 store/v2/storage/rocksdb/comparator.go delete mode 100644 store/v2/storage/rocksdb/db.go delete mode 100644 store/v2/storage/rocksdb/db_noflag.go delete mode 100644 store/v2/storage/rocksdb/db_test.go delete mode 100644 store/v2/storage/rocksdb/iterator.go delete mode 100644 store/v2/storage/rocksdb/opts.go delete mode 100644 store/v2/storage/storage_bench_test.go delete mode 100644 store/v2/storage/storage_test_suite.go delete mode 100644 store/v2/storage/store.go delete mode 100644 store/v2/storage/util/iterator.go diff --git a/runtime/v2/builder.go b/runtime/v2/builder.go index e6e8cb7c4ea5..b851955943b0 100644 --- a/runtime/v2/builder.go +++ b/runtime/v2/builder.go @@ -134,6 +134,7 @@ func (a *AppBuilder[T]) initGenesis(ctx context.Context, src io.Reader, txHandle if err != nil { return nil, fmt.Errorf("failed to read import state: %w", err) } + var genesisJSON map[string]json.RawMessage if err = json.Unmarshal(bz, &genesisJSON); err != nil { return nil, err diff --git a/server/v2/cometbft/abci_test.go b/server/v2/cometbft/abci_test.go index ab1fdc722879..c2bd81d65f23 100644 --- a/server/v2/cometbft/abci_test.go +++ b/server/v2/cometbft/abci_test.go @@ -591,7 +591,7 @@ func TestConsensus_Query(t *testing.T) { c := setUpConsensus(t, 100_000, cometmock.MockMempool[mock.Tx]{}) // Write data to state storage - err := c.store.GetStateStorage().ApplyChangeset(&store.Changeset{ + err := c.store.GetStateCommitment().WriteChangeset(&store.Changeset{ Version: 1, Changes: []store.StateChanges{ { @@ -691,9 +691,8 @@ func setUpConsensus(t *testing.T, gasLimit uint64, mempool mempool.Mempool[mock. ) require.NoError(t, err) - ss := cometmock.NewMockStorage(log.NewNopLogger(), t.TempDir()) sc := cometmock.NewMockCommiter(log.NewNopLogger(), string(actorName), "stf") - mockStore := cometmock.NewMockStore(ss, sc) + mockStore := cometmock.NewMockStore(sc) am := appmanager.New(appmanager.Config{ ValidateTxGasLimit: gasLimit, @@ -786,6 +785,7 @@ func TestOptimisticExecution(t *testing.T) { Txs: ppReq.Txs, } fbResp, err := c.FinalizeBlock(context.Background(), fbReq) + require.Nil(t, fbResp) require.Error(t, err) require.ErrorContains(t, err, "test error") // from optimisticMockFunc require.Equal(t, 1, calledTimes) diff --git a/server/v2/cometbft/internal/mock/mock_reader.go b/server/v2/cometbft/internal/mock/mock_reader.go index 9911ee55eb81..46c1d422c648 100644 --- a/server/v2/cometbft/internal/mock/mock_reader.go +++ b/server/v2/cometbft/internal/mock/mock_reader.go @@ -39,7 +39,7 @@ func NewMockReader(v uint64, rs *MockStore, actor []byte) *MockReader { } func (roa *MockReader) Has(key []byte) (bool, error) { - val, err := roa.store.GetStateStorage().Has(roa.actor, roa.version, key) + val, err := roa.store.GetStateCommitment().Has(roa.actor, roa.version, key) if err != nil { return false, err } @@ -48,7 +48,7 @@ func (roa *MockReader) Has(key []byte) (bool, error) { } func (roa *MockReader) Get(key []byte) ([]byte, error) { - result, err := roa.store.GetStateStorage().Get(roa.actor, roa.version, key) + result, err := roa.store.GetStateCommitment().Get(roa.actor, roa.version, key) if err != nil { return nil, err } @@ -57,9 +57,9 @@ func (roa *MockReader) Get(key []byte) ([]byte, error) { } func (roa *MockReader) Iterator(start, end []byte) (corestore.Iterator, error) { - return roa.store.GetStateStorage().Iterator(roa.actor, roa.version, start, end) + return roa.store.GetStateCommitment().Iterator(roa.actor, roa.version, start, end) } func (roa *MockReader) ReverseIterator(start, end []byte) (corestore.Iterator, error) { - return roa.store.GetStateStorage().ReverseIterator(roa.actor, roa.version, start, end) + return roa.store.GetStateCommitment().ReverseIterator(roa.actor, roa.version, start, end) } diff --git a/server/v2/cometbft/internal/mock/mock_store.go b/server/v2/cometbft/internal/mock/mock_store.go index b485a75d876b..8cb4542ac41e 100644 --- a/server/v2/cometbft/internal/mock/mock_store.go +++ b/server/v2/cometbft/internal/mock/mock_store.go @@ -11,21 +11,12 @@ import ( "cosmossdk.io/store/v2/commitment/iavl" dbm "cosmossdk.io/store/v2/db" "cosmossdk.io/store/v2/proof" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/pebbledb" ) type MockStore struct { - Storage storev2.VersionedWriter Committer storev2.Committer } -func NewMockStorage(logger log.Logger, dir string) storev2.VersionedWriter { - storageDB, _ := pebbledb.New(dir) - ss := storage.NewStorageStore(storageDB, logger) - return ss -} - func NewMockCommiter(logger log.Logger, actors ...string) storev2.Committer { treeMap := make(map[string]commitment.Tree) for _, actor := range actors { @@ -36,8 +27,8 @@ func NewMockCommiter(logger log.Logger, actors ...string) storev2.Committer { return sc } -func NewMockStore(ss storev2.VersionedWriter, sc storev2.Committer) *MockStore { - return &MockStore{Storage: ss, Committer: sc} +func NewMockStore(sc storev2.Committer) *MockStore { + return &MockStore{Committer: sc} } func (s *MockStore) GetLatestVersion() (uint64, error) { @@ -59,12 +50,7 @@ func (s *MockStore) StateLatest() (uint64, corestore.ReaderMap, error) { } func (s *MockStore) Commit(changeset *corestore.Changeset) (corestore.Hash, error) { - err := s.Storage.ApplyChangeset(changeset) - if err != nil { - return []byte{}, err - } - - err = s.Committer.WriteChangeset(changeset) + err := s.Committer.WriteChangeset(changeset) if err != nil { return []byte{}, err } @@ -81,10 +67,6 @@ func (s *MockStore) StateAt(version uint64) (corestore.ReaderMap, error) { return NewMockReaderMap(version, s), nil } -func (s *MockStore) GetStateStorage() storev2.VersionedWriter { - return s.Storage -} - func (s *MockStore) GetStateCommitment() storev2.Committer { return s.Committer } diff --git a/server/v2/cometbft/server.go b/server/v2/cometbft/server.go index 55a38b5e9646..ed0c4fba8702 100644 --- a/server/v2/cometbft/server.go +++ b/server/v2/cometbft/server.go @@ -127,7 +127,6 @@ func New[T transaction.Tx]( indexEvents[e] = struct{}{} } - ss := store.GetStateStorage().(snapshots.StorageSnapshotter) sc := store.GetStateCommitment().(snapshots.CommitSnapshotter) snapshotStore, err := GetSnapshotStore(srv.config.ConfigTomlConfig.RootDir) @@ -155,7 +154,6 @@ func New[T transaction.Tx]( snapshotStore, srv.serverOptions.SnapshotOptions(cfg), sc, - ss, nil, // extensions snapshotter registered below logger, ) diff --git a/server/v2/stf/branch/bench_test.go b/server/v2/stf/branch/bench_test.go index f275e8d4352f..67122b59b66f 100644 --- a/server/v2/stf/branch/bench_test.go +++ b/server/v2/stf/branch/bench_test.go @@ -105,6 +105,7 @@ func Benchmark_Iterate(b *testing.B) { // makeBranchStack creates a branch stack of the given size and initializes it with unique key-value pairs. func makeBranchStack(b *testing.B, stackSize int) Store[store.KVStore] { + b.Helper() parent := coretesting.NewMemKV() branch := NewStore[store.KVStore](parent) for i := 1; i < stackSize; i++ { diff --git a/server/v2/store/snapshot.go b/server/v2/store/snapshot.go index c858d47757a9..bf9e5ddb3827 100644 --- a/server/v2/store/snapshot.go +++ b/server/v2/store/snapshot.go @@ -375,10 +375,11 @@ func createSnapshotsManager( } sm := snapshots.NewManager( - snapshotStore, snapshots.NewSnapshotOptions(interval, uint32(keepRecent)), + snapshotStore, + snapshots.NewSnapshotOptions(interval, uint32(keepRecent)), store.GetStateCommitment().(snapshots.CommitSnapshotter), - store.GetStateStorage().(snapshots.StorageSnapshotter), - nil, logger) + nil, + logger) return sm, nil } diff --git a/store/iavl/store_test.go b/store/iavl/store_test.go index d0339c59a107..3a6050e2453e 100644 --- a/store/iavl/store_test.go +++ b/store/iavl/store_test.go @@ -79,8 +79,6 @@ func TestLoadStore(t *testing.T) { cIDHp := types.CommitID{Version: verHp, Hash: hash} require.Nil(t, err) - // TODO: Prune this height - // Create current height Hc updated, err = tree.Set([]byte("hello"), []byte("ciao")) require.NoError(t, err) diff --git a/store/v2/commitment/iavl/tree.go b/store/v2/commitment/iavl/tree.go index 5047e8ef6ed4..4aaac08ab8bf 100644 --- a/store/v2/commitment/iavl/tree.go +++ b/store/v2/commitment/iavl/tree.go @@ -83,6 +83,16 @@ func (t *IavlTree) Commit() ([]byte, uint64, error) { // GetProof returns a proof for the given key and version. func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) { + // the mutable tree is empty at genesis & when the storekey is removed, but the immutable tree is not but the immutable tree is not empty when the storekey is removed + // by checking the latest version we can determine if we are in genesis or have a key that has been removed + lv, err := t.tree.GetLatestVersion() + if err != nil { + return nil, err + } + if lv == 0 { + return t.tree.GetProof(key) + } + immutableTree, err := t.tree.GetImmutable(int64(version)) if err != nil { return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) @@ -93,6 +103,16 @@ func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, // Get implements the Reader interface. func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { + // the mutable tree is empty at genesis & when the storekey is removed, but the immutable tree is not but the immutable tree is not empty when the storekey is removed + // by checking the latest version we can determine if we are in genesis or have a key that has been removed + lv, err := t.tree.GetLatestVersion() + if err != nil { + return nil, err + } + if lv == 0 { + return t.tree.Get(key) + } + immutableTree, err := t.tree.GetImmutable(int64(version)) if err != nil { return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) @@ -103,6 +123,16 @@ func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { // Iterator implements the Reader interface. func (t *IavlTree) Iterator(version uint64, start, end []byte, ascending bool) (corestore.Iterator, error) { + // the mutable tree is empty at genesis & when the storekey is removed, but the immutable tree is not empty when the storekey is removed + // by checking the latest version we can determine if we are in genesis or have a key that has been removed + lv, err := t.tree.GetLatestVersion() + if err != nil { + return nil, err + } + if lv == 0 { + return t.tree.Iterator(start, end, ascending) + } + immutableTree, err := t.tree.GetImmutable(int64(version)) if err != nil { return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) diff --git a/store/v2/commitment/store.go b/store/v2/commitment/store.go index 5219255f95ca..aa383b57ae56 100644 --- a/store/v2/commitment/store.go +++ b/store/v2/commitment/store.go @@ -233,6 +233,7 @@ func (c *CommitStore) SetInitialVersion(version uint64) error { return nil } +// GetProof returns a proof for the given key and version. func (c *CommitStore) GetProof(storeKey []byte, version uint64, key []byte) ([]proof.CommitmentOp, error) { rawStoreKey := conv.UnsafeBytesToStr(storeKey) tree, ok := c.multiTrees[rawStoreKey] @@ -268,8 +269,12 @@ func (c *CommitStore) GetProof(storeKey []byte, version uint64, key []byte) ([]p // WARNING: This function is only used during the migration process. The SC layer // generally does not provide a reader for the CommitStore. func (c *CommitStore) getReader(storeKey string) (Reader, error) { - tree, ok := c.multiTrees[storeKey] - if !ok { + var tree Tree + if storeTree, ok := c.oldTrees[storeKey]; ok { + tree = storeTree + } else if storeTree, ok := c.multiTrees[storeKey]; ok { + tree = storeTree + } else { return nil, fmt.Errorf("store %s not found", storeKey) } @@ -283,6 +288,14 @@ func (c *CommitStore) getReader(storeKey string) (Reader, error) { // VersionExists implements store.VersionedReader. func (c *CommitStore) VersionExists(version uint64) (bool, error) { + latestVersion, err := c.metadata.GetLatestVersion() + if err != nil { + return false, err + } + if latestVersion == 0 { + return version == 0, nil + } + ci, err := c.metadata.GetCommitInfo(version) return ci != nil, err } @@ -435,12 +448,10 @@ func (c *CommitStore) Restore( version uint64, format uint32, protoReader protoio.Reader, - chStorage chan<- *corestore.StateChanges, ) (snapshotstypes.SnapshotItem, error) { var ( importer Importer snapshotItem snapshotstypes.SnapshotItem - storeKey []byte ) loop: @@ -463,8 +474,6 @@ loop: return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to close importer: %w", err) } } - - storeKey = []byte(item.Store.Name) tree := c.multiTrees[item.Store.Name] if tree == nil { return snapshotstypes.SnapshotItem{}, fmt.Errorf("store %s not found", item.Store.Name) @@ -493,17 +502,6 @@ loop: if node.Value == nil { node.Value = []byte{} } - - // If the node is a leaf node, it will be written to the storage. - chStorage <- &corestore.StateChanges{ - Actor: storeKey, - StateChanges: []corestore.KVPair{ - { - Key: node.Key, - Value: node.Value, - }, - }, - } } err := importer.Add(node) if err != nil { diff --git a/store/v2/commitment/store_test_suite.go b/store/v2/commitment/store_test_suite.go index c41a2540a070..b91119301c1e 100644 --- a/store/v2/commitment/store_test_suite.go +++ b/store/v2/commitment/store_test_suite.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "sync" "github.com/stretchr/testify/suite" @@ -32,6 +31,40 @@ type CommitStoreTestSuite struct { TreeType string } +// TestStore_Snapshotter tests the snapshot functionality of the CommitStore. +// This test verifies that the store can correctly create snapshots and restore from them. +// The test follows these steps: +// +// 1. Setup & Data Population: +// - Creates a new CommitStore with two stores (store1 and store2) +// - Writes 10 versions of data (version 1-10) +// - For each version, writes 10 key-value pairs to each store +// - Total data: 2 stores * 10 versions * 10 pairs = 200 key-value pairs +// - Keys are formatted as "key-{version}-{index}" +// - Values are formatted as "value-{version}-{index}" +// - Each version is committed to get a CommitInfo +// +// 2. Snapshot Creation: +// - Creates a dummy extension item for metadata testing +// - Sets up a new target store for restoration +// - Creates a channel for snapshot chunks +// - Launches a goroutine to: +// - Create a snapshot writer +// - Take a snapshot at version 10 +// - Write extension metadata +// +// 3. Snapshot Restoration: +// - Creates a snapshot reader from the chunks +// - Sets up a channel for state changes during restoration +// - Launches a goroutine to collect restored key-value pairs +// - Restores the snapshot into the target store +// - Verifies the extension metadata was preserved +// +// 4. Verification: +// - Confirms all 200 key-value pairs were restored correctly +// - Verifies the format: "{storeKey}_key-{version}-{index}" -> "value-{version}-{index}" +// - Checks that the restored store's Merkle tree hashes match the original +// - Ensures store integrity by comparing CommitInfo hashes func (s *CommitStoreTestSuite) TestStore_Snapshotter() { if s.TreeType == "iavlv2" { s.T().Skip("FIXME: iavlv2 does not yet support snapshots") @@ -40,21 +73,26 @@ func (s *CommitStoreTestSuite) TestStore_Snapshotter() { commitStore, err := s.NewStore(dbm.NewMemDB(), s.T().TempDir(), storeKeys, nil, coretesting.NewNopLogger()) s.Require().NoError(err) + // We'll create 10 versions of data latestVersion := uint64(10) kvCount := 10 var cInfo *proof.CommitInfo + + // For each version 1-10 for i := uint64(1); i <= latestVersion; i++ { + // Create KV pairs for each store kvPairs := make(map[string]corestore.KVPairs) for _, storeKey := range storeKeys { kvPairs[storeKey] = corestore.KVPairs{} + // Create 10 KV pairs for this store for j := 0; j < kvCount; j++ { key := []byte(fmt.Sprintf("key-%d-%d", i, j)) value := []byte(fmt.Sprintf("value-%d-%d", i, j)) kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) } } + // Write and commit the changes for this version s.Require().NoError(commitStore.WriteChangeset(corestore.NewChangesetWithPairs(i, kvPairs))) - cInfo, err = commitStore.Commit(i) s.Require().NoError(err) } @@ -88,34 +126,11 @@ func (s *CommitStoreTestSuite) TestStore_Snapshotter() { streamReader, err := snapshots.NewStreamReader(chunks) s.Require().NoError(err) - chStorage := make(chan *corestore.StateChanges, 100) - leaves := make(map[string]string) - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - for kv := range chStorage { - for _, actor := range kv.StateChanges { - leaves[fmt.Sprintf("%s_%s", kv.Actor, actor.Key)] = string(actor.Value) - } - } - wg.Done() - }() - nextItem, err := targetStore.Restore(latestVersion, snapshotstypes.CurrentFormat, streamReader, chStorage) + + nextItem, err := targetStore.Restore(latestVersion, snapshotstypes.CurrentFormat, streamReader) s.Require().NoError(err) s.Require().Equal(*dummyExtensionItem.GetExtension(), *nextItem.GetExtension()) - close(chStorage) - wg.Wait() - s.Require().Equal(len(storeKeys)*kvCount*int(latestVersion), len(leaves)) - for _, storeKey := range storeKeys { - for i := 1; i <= int(latestVersion); i++ { - for j := 0; j < kvCount; j++ { - key := fmt.Sprintf("%s_key-%d-%d", storeKey, i, j) - s.Require().Equal(leaves[key], fmt.Sprintf("value-%d-%d", i, j)) - } - } - } - // check the restored tree hash targetCommitInfo, err := targetStore.GetCommitInfo(latestVersion) s.Require().NoError(err) diff --git a/store/v2/database.go b/store/v2/database.go index e3361d731024..0e0697de57bb 100644 --- a/store/v2/database.go +++ b/store/v2/database.go @@ -7,19 +7,6 @@ import ( "cosmossdk.io/store/v2/proof" ) -// VersionedWriter defines an API for a versioned database that allows reads, -// writes, iteration and commitment over a series of versions. -type VersionedWriter interface { - VersionedReader - - SetLatestVersion(version uint64) error - ApplyChangeset(cs *corestore.Changeset) error - - // Closer releases associated resources. It should NOT be idempotent. It must - // only be called once and any call after may panic. - io.Closer -} - type VersionedReader interface { Has(storeKey []byte, version uint64, key []byte) (bool, error) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) @@ -41,6 +28,8 @@ type UpgradableDatabase interface { // Committer defines an API for committing state. type Committer interface { + UpgradeableStore + VersionedReader // WriteChangeset writes the changeset to the commitment state. WriteChangeset(cs *corestore.Changeset) error diff --git a/store/v2/migration/README.md b/store/v2/migration/README.md index 9db8c9874a8c..88b395f63f75 100644 --- a/store/v2/migration/README.md +++ b/store/v2/migration/README.md @@ -108,4 +108,4 @@ This limitation should be clearly understood before starting the migration proce especially if the node relies on historical data for any operations. If historical queries are required, users must fully migrate all historical data to `store/v2`. -Alternatively, keeping store/v1 accessible for historical queries could be an option. \ No newline at end of file +Alternatively, keeping store/v1 accessible for historical queries could be an option. diff --git a/store/v2/migration/manager.go b/store/v2/migration/manager.go index d5118a6313e8..5365e8eb6a11 100644 --- a/store/v2/migration/manager.go +++ b/store/v2/migration/manager.go @@ -4,8 +4,7 @@ import ( "encoding/binary" "errors" "fmt" - "io" - "sync" + "sync/atomic" "time" "golang.org/x/sync/errgroup" @@ -15,15 +14,11 @@ import ( "cosmossdk.io/store/v2/commitment" "cosmossdk.io/store/v2/internal/encoding" "cosmossdk.io/store/v2/snapshots" - snapshotstypes "cosmossdk.io/store/v2/snapshots/types" - "cosmossdk.io/store/v2/storage" ) const ( // defaultChannelBufferSize is the default buffer size for the migration stream. defaultChannelBufferSize = 1024 - // defaultStorageBufferSize is the default buffer size for the storage snapshotter. - defaultStorageBufferSize = 1024 migrateChangesetKeyFmt = "m/cs_%x" // m/cs_ ) @@ -39,12 +34,11 @@ type Manager struct { logger log.Logger snapshotsManager *snapshots.Manager - stateStorage *storage.StorageStore stateCommitment *commitment.CommitStore - db corestore.KVStoreWithBatch - mtx sync.Mutex // mutex for migratedVersion - migratedVersion uint64 + db corestore.KVStoreWithBatch + + migratedVersion atomic.Uint64 chChangeset <-chan *VersionedChangeset chDone <-chan struct{} @@ -53,11 +47,10 @@ type Manager struct { // NewManager returns a new Manager. // // NOTE: `sc` can be `nil` if don't want to migrate the commitment. -func NewManager(db corestore.KVStoreWithBatch, sm *snapshots.Manager, ss *storage.StorageStore, sc *commitment.CommitStore, logger log.Logger) *Manager { +func NewManager(db corestore.KVStoreWithBatch, sm *snapshots.Manager, sc *commitment.CommitStore, logger log.Logger) *Manager { return &Manager{ logger: logger, snapshotsManager: sm, - stateStorage: ss, stateCommitment: sc, db: db, } @@ -96,63 +89,14 @@ func (m *Manager) Migrate(height uint64) error { // create the migration stream and snapshot, // which acts as protoio.Reader and snapshots.WriteCloser. ms := NewMigrationStream(defaultChannelBufferSize) - if err := m.snapshotsManager.CreateMigration(height, ms); err != nil { return err } - // restore the snapshot - chStorage := make(chan *corestore.StateChanges, defaultStorageBufferSize) - eg := new(errgroup.Group) eg.Go(func() error { - return m.stateStorage.Restore(height, chStorage) - }) - eg.Go(func() error { - defer close(chStorage) - if m.stateCommitment != nil { - if _, err := m.stateCommitment.Restore(height, 0, ms, chStorage); err != nil { - return err - } - } else { // there is no commitment migration, just consume the stream to restore the state storage - var storeKey []byte - loop: - for { - snapshotItem := snapshotstypes.SnapshotItem{} - err := ms.ReadMsg(&snapshotItem) - if errors.Is(err, io.EOF) { - break - } - if err != nil { - return fmt.Errorf("failed to read snapshot item: %w", err) - } - switch item := snapshotItem.Item.(type) { - case *snapshotstypes.SnapshotItem_Store: - storeKey = []byte(item.Store.Name) - case *snapshotstypes.SnapshotItem_IAVL: - if item.IAVL.Height == 0 { // only restore the leaf nodes - key := item.IAVL.Key - if key == nil { - key = []byte{} - } - value := item.IAVL.Value - if value == nil { - value = []byte{} - } - chStorage <- &corestore.StateChanges{ - Actor: storeKey, - StateChanges: []corestore.KVPair{ - { - Key: key, - Value: value, - }, - }, - } - } - default: - break loop - } - } + if _, err := m.stateCommitment.Restore(height, 0, ms); err != nil { + return err } return nil }) @@ -161,9 +105,7 @@ func (m *Manager) Migrate(height uint64) error { return err } - m.mtx.Lock() - m.migratedVersion = height - m.mtx.Unlock() + m.migratedVersion.Store(height) return nil } @@ -207,9 +149,7 @@ func (m *Manager) writeChangeset() error { // GetMigratedVersion returns the migrated version. // It is used to check the migrated version in the RootStore. func (m *Manager) GetMigratedVersion() uint64 { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.migratedVersion + return m.migratedVersion.Load() } // Sync catches up the Changesets which are committed while the migration is in progress. @@ -251,13 +191,8 @@ func (m *Manager) Sync() error { return fmt.Errorf("failed to commit changeset to commitment: %w", err) } } - if err := m.stateStorage.ApplyChangeset(cs); err != nil { - return fmt.Errorf("failed to write changeset to storage: %w", err) - } - m.mtx.Lock() - m.migratedVersion = version - m.mtx.Unlock() + m.migratedVersion.Store(version) version += 1 } diff --git a/store/v2/migration/manager_test.go b/store/v2/migration/manager_test.go index 07a5b15b8350..103b3244b650 100644 --- a/store/v2/migration/manager_test.go +++ b/store/v2/migration/manager_test.go @@ -13,13 +13,11 @@ import ( "cosmossdk.io/store/v2/commitment/iavl" dbm "cosmossdk.io/store/v2/db" "cosmossdk.io/store/v2/snapshots" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/pebbledb" ) var storeKeys = []string{"store1", "store2"} -func setupMigrationManager(t *testing.T, noCommitStore bool) (*Manager, *commitment.CommitStore) { +func setupMigrationManager(t *testing.T) (*Manager, *commitment.CommitStore) { t.Helper() db := dbm.NewMemDB() @@ -28,18 +26,13 @@ func setupMigrationManager(t *testing.T, noCommitStore bool) (*Manager, *commitm prefixDB := dbm.NewPrefixDB(db, []byte(storeKey)) multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, coretesting.NewNopLogger(), iavl.DefaultConfig()) } - commitStore, err := commitment.NewCommitStore(multiTrees, nil, db, coretesting.NewNopLogger()) require.NoError(t, err) snapshotsStore, err := snapshots.NewStore(t.TempDir()) require.NoError(t, err) - snapshotsManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), commitStore, nil, nil, coretesting.NewNopLogger()) - - storageDB, err := pebbledb.New(t.TempDir()) - require.NoError(t, err) - newStorageStore := storage.NewStorageStore(storageDB, coretesting.NewNopLogger()) // for store/v2 + snapshotsManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), commitStore, nil, coretesting.NewNopLogger()) db1 := dbm.NewMemDB() multiTrees1 := make(map[string]commitment.Tree) @@ -50,171 +43,137 @@ func setupMigrationManager(t *testing.T, noCommitStore bool) (*Manager, *commitm newCommitStore, err := commitment.NewCommitStore(multiTrees1, nil, db1, coretesting.NewNopLogger()) // for store/v2 require.NoError(t, err) - if noCommitStore { - newCommitStore = nil - } - return NewManager(db, snapshotsManager, newStorageStore, newCommitStore, coretesting.NewNopLogger()), commitStore + return NewManager(db, snapshotsManager, newCommitStore, coretesting.NewNopLogger()), commitStore } func TestMigrateState(t *testing.T) { - for _, noCommitStore := range []bool{false, true} { - t.Run(fmt.Sprintf("Migrate noCommitStore=%v", noCommitStore), func(t *testing.T) { - m, orgCommitStore := setupMigrationManager(t, noCommitStore) - - // apply changeset - toVersion := uint64(100) - keyCount := 10 - for version := uint64(1); version <= toVersion; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - require.NoError(t, orgCommitStore.WriteChangeset(cs)) - _, err := orgCommitStore.Commit(version) - require.NoError(t, err) + m, orgCommitStore := setupMigrationManager(t) + // apply changeset + toVersion := uint64(100) + keyCount := 10 + for version := uint64(1); version <= toVersion; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) } + } + require.NoError(t, orgCommitStore.WriteChangeset(cs)) + _, err := orgCommitStore.Commit(version) + require.NoError(t, err) + } - err := m.Migrate(toVersion - 1) - require.NoError(t, err) - - // expecting error for conflicting process, since Migrate trigger snapshotter create migration, - // which start a snapshot process already. - _, err = m.snapshotsManager.Create(toVersion - 1) - require.Error(t, err) - - if m.stateCommitment != nil { - // check the migrated state - for version := uint64(1); version < toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) - require.NoError(t, err) - require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) - } - } - } - // check the latest state - val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1")) - require.NoError(t, err) - require.Nil(t, val) - val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0")) - require.NoError(t, err) - require.Nil(t, val) - } + err := m.Migrate(toVersion - 1) + require.NoError(t, err) - // check the storage - for version := uint64(1); version < toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - val, err := m.stateStorage.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) - require.NoError(t, err) - require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) - } - } + // expecting error for conflicting process, since Migrate trigger snapshotter create migration, + // which start a snapshot process already. + _, err = m.snapshotsManager.Create(toVersion - 1) + fmt.Println(1) + require.Error(t, err) + + // check the migrated state + for version := uint64(1); version < toVersion; version++ { + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) + require.NoError(t, err) + require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) } - }) + } + + // check the latest state + val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1")) + require.NoError(t, err) + require.Nil(t, val) + val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0")) + require.NoError(t, err) + require.Nil(t, val) } } func TestStartMigrateState(t *testing.T) { - for _, noCommitStore := range []bool{false, true} { - t.Run(fmt.Sprintf("Migrate noCommitStore=%v", noCommitStore), func(t *testing.T) { - m, orgCommitStore := setupMigrationManager(t, noCommitStore) - - chDone := make(chan struct{}) - chChangeset := make(chan *VersionedChangeset, 1) - - // apply changeset - toVersion := uint64(10) - keyCount := 5 - changesets := []corestore.Changeset{} - - for version := uint64(1); version <= toVersion; version++ { - cs := corestore.NewChangeset(version) - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - changesets = append(changesets, *cs) - require.NoError(t, orgCommitStore.WriteChangeset(cs)) - _, err := orgCommitStore.Commit(version) - require.NoError(t, err) - } + m, orgCommitStore := setupMigrationManager(t) - // feed changesets to channel - go func() { - for version := uint64(1); version <= toVersion; version++ { - chChangeset <- &VersionedChangeset{ - Version: version, - Changeset: &changesets[version-1], - } - } - }() - - // check if migrate process complete - go func() { - for { - migrateVersion := m.GetMigratedVersion() - if migrateVersion == toVersion-1 { - break - } - } + chDone := make(chan struct{}) + chChangeset := make(chan *VersionedChangeset, 1) - chDone <- struct{}{} - }() - - err := m.Start(toVersion-1, chChangeset, chDone) - require.NoError(t, err) - - // expecting error for conflicting process, since Migrate trigger snapshotter create migration, - // which start a snapshot process already. - _, err = m.snapshotsManager.Create(toVersion - 1) - require.Error(t, err) - - if m.stateCommitment != nil { - // check the migrated state - for version := uint64(1); version < toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) - require.NoError(t, err) - require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) - } - } - } - // check the latest state - val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1")) - require.NoError(t, err) - require.Nil(t, val) - val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0")) - require.NoError(t, err) - require.Nil(t, val) + // apply changeset + toVersion := uint64(10) + keyCount := 5 + changesets := []corestore.Changeset{} + + for version := uint64(1); version <= toVersion; version++ { + cs := corestore.NewChangeset(version) + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) } + } + changesets = append(changesets, *cs) + require.NoError(t, orgCommitStore.WriteChangeset(cs)) + _, err := orgCommitStore.Commit(version) + require.NoError(t, err) + } - // check the storage - for version := uint64(1); version < toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - val, err := m.stateStorage.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) - require.NoError(t, err) - require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) - } - } + // feed changesets to channel + go func() { + for version := uint64(1); version <= toVersion; version++ { + chChangeset <- &VersionedChangeset{ + Version: version, + Changeset: &changesets[version-1], } + } + }() + + // check if migrate process complete + go func() { + for { + migrateVersion := m.GetMigratedVersion() + if migrateVersion == toVersion-1 { + break + } + } - // check if migration db write change set to storage - for version := uint64(1); version < toVersion; version++ { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, version) - csKey := []byte(fmt.Sprintf(migrateChangesetKeyFmt, buf)) - csVal, err := m.db.Get(csKey) - require.NoError(t, err) - require.NotEmpty(t, csVal) + chDone <- struct{}{} + }() + + err := m.Start(toVersion-1, chChangeset, chDone) + require.NoError(t, err) + + // expecting error for conflicting process, since Migrate trigger snapshotter create migration, + // which start a snapshot process already. + _, err = m.snapshotsManager.Create(toVersion - 1) + require.Error(t, err) + + if m.stateCommitment != nil { + // check the migrated state + for version := uint64(1); version < toVersion; version++ { + for _, storeKey := range storeKeys { + for i := 0; i < keyCount; i++ { + val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) + require.NoError(t, err) + require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) + } } - }) + } + // check the latest state + val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1")) + require.NoError(t, err) + require.Nil(t, val) + val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0")) + require.NoError(t, err) + require.Nil(t, val) + } + + // check if migration db write change set to storage + for version := uint64(1); version < toVersion; version++ { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, version) + csKey := []byte(fmt.Sprintf(migrateChangesetKeyFmt, buf)) + csVal, err := m.db.Get(csKey) + require.NoError(t, err) + require.NotEmpty(t, csVal) } } diff --git a/store/v2/mock/db_mock.go b/store/v2/mock/db_mock.go index ba65f2baf243..31541c998f3b 100644 --- a/store/v2/mock/db_mock.go +++ b/store/v2/mock/db_mock.go @@ -130,6 +130,36 @@ func (mr *MockStateCommitterMockRecorder) GetProof(storeKey, version, key any) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProof", reflect.TypeOf((*MockStateCommitter)(nil).GetProof), storeKey, version, key) } +// Has mocks base method. +func (m *MockStateCommitter) Has(storeKey []byte, version uint64, key []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Has", storeKey, version, key) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Has indicates an expected call of Has. +func (mr *MockStateCommitterMockRecorder) Has(storeKey, version, key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockStateCommitter)(nil).Has), storeKey, version, key) +} + +// Iterator mocks base method. +func (m *MockStateCommitter) Iterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Iterator", storeKey, version, start, end) + ret0, _ := ret[0].(store.Iterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Iterator indicates an expected call of Iterator. +func (mr *MockStateCommitterMockRecorder) Iterator(storeKey, version, start, end any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockStateCommitter)(nil).Iterator), storeKey, version, start, end) +} + // LoadVersion mocks base method. func (m *MockStateCommitter) LoadVersion(targetVersion uint64) error { m.ctrl.T.Helper() @@ -198,174 +228,8 @@ func (mr *MockStateCommitterMockRecorder) Prune(version any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prune", reflect.TypeOf((*MockStateCommitter)(nil).Prune), version) } -// SetInitialVersion mocks base method. -func (m *MockStateCommitter) SetInitialVersion(version uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetInitialVersion", version) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetInitialVersion indicates an expected call of SetInitialVersion. -func (mr *MockStateCommitterMockRecorder) SetInitialVersion(version any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetInitialVersion", reflect.TypeOf((*MockStateCommitter)(nil).SetInitialVersion), version) -} - -// WriteChangeset mocks base method. -func (m *MockStateCommitter) WriteChangeset(cs *store.Changeset) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteChangeset", cs) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteChangeset indicates an expected call of WriteChangeset. -func (mr *MockStateCommitterMockRecorder) WriteChangeset(cs any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChangeset", reflect.TypeOf((*MockStateCommitter)(nil).WriteChangeset), cs) -} - -// MockStateStorage is a mock of StateStorage interface. -type MockStateStorage struct { - ctrl *gomock.Controller - recorder *MockStateStorageMockRecorder - isgomock struct{} -} - -// MockStateStorageMockRecorder is the mock recorder for MockStateStorage. -type MockStateStorageMockRecorder struct { - mock *MockStateStorage -} - -// NewMockStateStorage creates a new mock instance. -func NewMockStateStorage(ctrl *gomock.Controller) *MockStateStorage { - mock := &MockStateStorage{ctrl: ctrl} - mock.recorder = &MockStateStorageMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockStateStorage) EXPECT() *MockStateStorageMockRecorder { - return m.recorder -} - -// ApplyChangeset mocks base method. -func (m *MockStateStorage) ApplyChangeset(cs *store.Changeset) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ApplyChangeset", cs) - ret0, _ := ret[0].(error) - return ret0 -} - -// ApplyChangeset indicates an expected call of ApplyChangeset. -func (mr *MockStateStorageMockRecorder) ApplyChangeset(cs any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyChangeset", reflect.TypeOf((*MockStateStorage)(nil).ApplyChangeset), cs) -} - -// Close mocks base method. -func (m *MockStateStorage) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockStateStorageMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockStateStorage)(nil).Close)) -} - -// Get mocks base method. -func (m *MockStateStorage) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", storeKey, version, key) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockStateStorageMockRecorder) Get(storeKey, version, key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockStateStorage)(nil).Get), storeKey, version, key) -} - -// GetLatestVersion mocks base method. -func (m *MockStateStorage) GetLatestVersion() (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLatestVersion") - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetLatestVersion indicates an expected call of GetLatestVersion. -func (mr *MockStateStorageMockRecorder) GetLatestVersion() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLatestVersion", reflect.TypeOf((*MockStateStorage)(nil).GetLatestVersion)) -} - -// Has mocks base method. -func (m *MockStateStorage) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Has", storeKey, version, key) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Has indicates an expected call of Has. -func (mr *MockStateStorageMockRecorder) Has(storeKey, version, key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockStateStorage)(nil).Has), storeKey, version, key) -} - -// Iterator mocks base method. -func (m *MockStateStorage) Iterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Iterator", storeKey, version, start, end) - ret0, _ := ret[0].(store.Iterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Iterator indicates an expected call of Iterator. -func (mr *MockStateStorageMockRecorder) Iterator(storeKey, version, start, end any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockStateStorage)(nil).Iterator), storeKey, version, start, end) -} - -// PausePruning mocks base method. -func (m *MockStateStorage) PausePruning(pause bool) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PausePruning", pause) -} - -// PausePruning indicates an expected call of PausePruning. -func (mr *MockStateStorageMockRecorder) PausePruning(pause any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PausePruning", reflect.TypeOf((*MockStateStorage)(nil).PausePruning), pause) -} - -// Prune mocks base method. -func (m *MockStateStorage) Prune(version uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Prune", version) - ret0, _ := ret[0].(error) - return ret0 -} - -// Prune indicates an expected call of Prune. -func (mr *MockStateStorageMockRecorder) Prune(version any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prune", reflect.TypeOf((*MockStateStorage)(nil).Prune), version) -} - // PruneStoreKeys mocks base method. -func (m *MockStateStorage) PruneStoreKeys(storeKeys []string, version uint64) error { +func (m *MockStateCommitter) PruneStoreKeys(storeKeys []string, version uint64) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PruneStoreKeys", storeKeys, version) ret0, _ := ret[0].(error) @@ -373,13 +237,13 @@ func (m *MockStateStorage) PruneStoreKeys(storeKeys []string, version uint64) er } // PruneStoreKeys indicates an expected call of PruneStoreKeys. -func (mr *MockStateStorageMockRecorder) PruneStoreKeys(storeKeys, version any) *gomock.Call { +func (mr *MockStateCommitterMockRecorder) PruneStoreKeys(storeKeys, version any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneStoreKeys", reflect.TypeOf((*MockStateStorage)(nil).PruneStoreKeys), storeKeys, version) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneStoreKeys", reflect.TypeOf((*MockStateCommitter)(nil).PruneStoreKeys), storeKeys, version) } // ReverseIterator mocks base method. -func (m *MockStateStorage) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) { +func (m *MockStateCommitter) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReverseIterator", storeKey, version, start, end) ret0, _ := ret[0].(store.Iterator) @@ -388,27 +252,27 @@ func (m *MockStateStorage) ReverseIterator(storeKey []byte, version uint64, star } // ReverseIterator indicates an expected call of ReverseIterator. -func (mr *MockStateStorageMockRecorder) ReverseIterator(storeKey, version, start, end any) *gomock.Call { +func (mr *MockStateCommitterMockRecorder) ReverseIterator(storeKey, version, start, end any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockStateStorage)(nil).ReverseIterator), storeKey, version, start, end) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockStateCommitter)(nil).ReverseIterator), storeKey, version, start, end) } -// SetLatestVersion mocks base method. -func (m *MockStateStorage) SetLatestVersion(version uint64) error { +// SetInitialVersion mocks base method. +func (m *MockStateCommitter) SetInitialVersion(version uint64) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetLatestVersion", version) + ret := m.ctrl.Call(m, "SetInitialVersion", version) ret0, _ := ret[0].(error) return ret0 } -// SetLatestVersion indicates an expected call of SetLatestVersion. -func (mr *MockStateStorageMockRecorder) SetLatestVersion(version any) *gomock.Call { +// SetInitialVersion indicates an expected call of SetInitialVersion. +func (mr *MockStateCommitterMockRecorder) SetInitialVersion(version any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLatestVersion", reflect.TypeOf((*MockStateStorage)(nil).SetLatestVersion), version) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetInitialVersion", reflect.TypeOf((*MockStateCommitter)(nil).SetInitialVersion), version) } // VersionExists mocks base method. -func (m *MockStateStorage) VersionExists(v uint64) (bool, error) { +func (m *MockStateCommitter) VersionExists(v uint64) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "VersionExists", v) ret0, _ := ret[0].(bool) @@ -417,7 +281,21 @@ func (m *MockStateStorage) VersionExists(v uint64) (bool, error) { } // VersionExists indicates an expected call of VersionExists. -func (mr *MockStateStorageMockRecorder) VersionExists(v any) *gomock.Call { +func (mr *MockStateCommitterMockRecorder) VersionExists(v any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VersionExists", reflect.TypeOf((*MockStateStorage)(nil).VersionExists), v) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VersionExists", reflect.TypeOf((*MockStateCommitter)(nil).VersionExists), v) +} + +// WriteChangeset mocks base method. +func (m *MockStateCommitter) WriteChangeset(cs *store.Changeset) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteChangeset", cs) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteChangeset indicates an expected call of WriteChangeset. +func (mr *MockStateCommitterMockRecorder) WriteChangeset(cs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChangeset", reflect.TypeOf((*MockStateCommitter)(nil).WriteChangeset), cs) } diff --git a/store/v2/mock/types.go b/store/v2/mock/types.go index 83eba3326f26..3c5edb372a85 100644 --- a/store/v2/mock/types.go +++ b/store/v2/mock/types.go @@ -8,12 +8,6 @@ type StateCommitter interface { store.Pruner store.PausablePruner store.UpgradeableStore -} - -// StateStorage is a mock of store.VersionedWriter -type StateStorage interface { - store.VersionedWriter + store.VersionedReader store.UpgradableDatabase - store.Pruner - store.PausablePruner } diff --git a/store/v2/pruning/manager.go b/store/v2/pruning/manager.go index 4e61a7459d08..e21fe1ce1952 100644 --- a/store/v2/pruning/manager.go +++ b/store/v2/pruning/manager.go @@ -10,19 +10,13 @@ type Manager struct { scPruner store.Pruner // scPruningOption are the pruning options for the SC. scPruningOption *store.PruningOption - // ssPruner is the pruner for the SS. - ssPruner store.Pruner - // ssPruningOption are the pruning options for the SS. - ssPruningOption *store.PruningOption } // NewManager creates a new Pruning Manager. -func NewManager(scPruner, ssPruner store.Pruner, scPruningOption, ssPruningOption *store.PruningOption) *Manager { +func NewManager(scPruner store.Pruner, scPruningOption *store.PruningOption) *Manager { return &Manager{ scPruner: scPruner, scPruningOption: scPruningOption, - ssPruner: ssPruner, - ssPruningOption: ssPruningOption, } } @@ -39,15 +33,6 @@ func (m *Manager) Prune(version uint64) error { } } - // Prune the SS. - if m.ssPruningOption != nil { - if prune, pruneTo := m.ssPruningOption.ShouldPrune(version); prune { - if err := m.ssPruner.Prune(pruneTo); err != nil { - return err - } - } - } - return nil } @@ -55,9 +40,6 @@ func (m *Manager) signalPruning(pause bool) { if scPausablePruner, ok := m.scPruner.(store.PausablePruner); ok { scPausablePruner.PausePruning(pause) } - if ssPausablePruner, ok := m.ssPruner.(store.PausablePruner); ok { - ssPausablePruner.PausePruning(pause) - } } func (m *Manager) PausePruning() { diff --git a/store/v2/pruning/manager_test.go b/store/v2/pruning/manager_test.go index 3dd7ec5fd336..d45d123a3504 100644 --- a/store/v2/pruning/manager_test.go +++ b/store/v2/pruning/manager_test.go @@ -14,8 +14,6 @@ import ( "cosmossdk.io/store/v2/commitment" "cosmossdk.io/store/v2/commitment/iavl" dbm "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/pebbledb" ) var storeKeys = []string{"store1", "store2", "store3"} @@ -25,7 +23,6 @@ type PruningManagerTestSuite struct { manager *Manager sc *commitment.CommitStore - ss *storage.StorageStore } func TestPruningManagerTestSuite(t *testing.T) { @@ -45,12 +42,8 @@ func (s *PruningManagerTestSuite) SetupTest() { s.sc, err = commitment.NewCommitStore(multiTrees, nil, mdb, nopLog) s.Require().NoError(err) - pebbleDB, err := pebbledb.New(s.T().TempDir()) - s.Require().NoError(err) - s.ss = storage.NewStorageStore(pebbleDB, nopLog) - scPruningOption := store.NewPruningOptionWithCustom(0, 1) // prune all - ssPruningOption := store.NewPruningOptionWithCustom(5, 10) // prune some - s.manager = NewManager(s.sc, s.ss, scPruningOption, ssPruningOption) + scPruningOption := store.NewPruningOptionWithCustom(0, 1) // prune all + s.manager = NewManager(s.sc, scPruningOption) } func (s *PruningManagerTestSuite) TestPrune() { @@ -68,8 +61,6 @@ func (s *PruningManagerTestSuite) TestPrune() { _, err := s.sc.Commit(version) s.Require().NoError(err) - s.Require().NoError(s.ss.ApplyChangeset(cs)) - s.Require().NoError(s.manager.Prune(version)) } @@ -86,24 +77,6 @@ func (s *PruningManagerTestSuite) TestPrune() { return count == len(storeKeys) } s.Require().Eventually(checkSCPrune, 10*time.Second, 1*time.Second) - - // check the storage store - _, pruneVersion := s.manager.ssPruningOption.ShouldPrune(toVersion) - for version := uint64(1); version <= toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - key := []byte(fmt.Sprintf("key-%d-%d", version, i)) - value, err := s.ss.Get([]byte(storeKey), version, key) - if version <= pruneVersion { - s.Require().Nil(value) - s.Require().Error(err) - } else { - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), value) - } - } - } - } } func TestPruningOption(t *testing.T) { @@ -164,8 +137,6 @@ func (s *PruningManagerTestSuite) TestSignalCommit() { _, err := s.sc.Commit(1) s.Require().NoError(err) - s.Require().NoError(s.ss.ApplyChangeset(cs)) - // commit version 2 for _, storeKey := range storeKeys { cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", 2, 0)), []byte(fmt.Sprintf("value-%d-%d", 2, 0)), false) @@ -179,8 +150,6 @@ func (s *PruningManagerTestSuite) TestSignalCommit() { _, err = s.sc.Commit(2) s.Require().NoError(err) - s.Require().NoError(s.ss.ApplyChangeset(cs)) - // try prune before signaling commit has finished s.Require().NoError(s.manager.Prune(2)) @@ -238,7 +207,6 @@ func (s *PruningManagerTestSuite) TestSignalCommit() { _, err := s.sc.Commit(version) s.Require().NoError(err) - s.Require().NoError(s.ss.ApplyChangeset(cs)) err = s.manager.ResumePruning(version) s.Require().NoError(err) } diff --git a/store/v2/root/factory.go b/store/v2/root/factory.go index 2bd710182f26..36eadf2382bc 100644 --- a/store/v2/root/factory.go +++ b/store/v2/root/factory.go @@ -3,7 +3,6 @@ package root import ( "errors" "fmt" - "os" "cosmossdk.io/core/log" corestore "cosmossdk.io/core/store" @@ -14,28 +13,20 @@ import ( "cosmossdk.io/store/v2/db" "cosmossdk.io/store/v2/internal" "cosmossdk.io/store/v2/pruning" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/pebbledb" - "cosmossdk.io/store/v2/storage/rocksdb" ) type ( - SSType string SCType string ) const ( - SSTypePebble SSType = "pebble" - SSTypeRocks SSType = "rocksdb" SCTypeIavl SCType = "iavl" SCTypeIavlV2 SCType = "iavl-v2" ) // Options are the options for creating a root store. type Options struct { - SSType SSType `mapstructure:"ss-type" toml:"ss-type" comment:"State storage database type. Currently we support: \"pebble\" and \"rocksdb\""` SCType SCType `mapstructure:"sc-type" toml:"sc-type" comment:"State commitment database type. Currently we support: \"iavl\" and \"iavl-v2\""` - SSPruningOption *store.PruningOption `mapstructure:"ss-pruning-option" toml:"ss-pruning-option" comment:"Pruning options for state storage"` SCPruningOption *store.PruningOption `mapstructure:"sc-pruning-option" toml:"sc-pruning-option" comment:"Pruning options for state commitment"` IavlConfig *iavl.Config `mapstructure:"iavl-config" toml:"iavl-config"` } @@ -52,16 +43,11 @@ type FactoryOptions struct { // DefaultStoreOptions returns the default options for creating a root store. func DefaultStoreOptions() Options { return Options{ - SSType: SSTypePebble, SCType: SCTypeIavl, SCPruningOption: &store.PruningOption{ KeepRecent: 2, Interval: 100, }, - SSPruningOption: &store.PruningOption{ - KeepRecent: 2, - Interval: 100, - }, IavlConfig: &iavl.Config{ CacheSize: 100_000, SkipFastStorageUpgrade: true, @@ -75,39 +61,11 @@ func DefaultStoreOptions() Options { // necessary, but demonstrates the required steps and configuration to create a root store. func CreateRootStore(opts *FactoryOptions) (store.RootStore, error) { var ( - ssDb storage.Database - ss *storage.StorageStore - sc *commitment.CommitStore - err error - ensureDir = func(dir string) error { - if err := os.MkdirAll(dir, 0o0755); err != nil { - return fmt.Errorf("failed to create directory %s: %w", dir, err) - } - return nil - } + sc *commitment.CommitStore + err error ) storeOpts := opts.Options - switch storeOpts.SSType { - case SSTypePebble: - dir := fmt.Sprintf("%s/data/ss/pebble", opts.RootDir) - if err = ensureDir(dir); err != nil { - return nil, err - } - ssDb, err = pebbledb.New(dir) - case SSTypeRocks: - dir := fmt.Sprintf("%s/data/ss/rocksdb", opts.RootDir) - if err = ensureDir(dir); err != nil { - return nil, err - } - ssDb, err = rocksdb.New(dir) - default: - return nil, fmt.Errorf("unknown storage type: %s", opts.Options.SSType) - } - if err != nil { - return nil, err - } - ss = storage.NewStorageStore(ssDb, opts.Logger) metadata := commitment.NewMetadataStore(opts.SCRawDB) latestVersion, err := metadata.GetLatestVersion() @@ -168,6 +126,6 @@ func CreateRootStore(opts *FactoryOptions) (store.RootStore, error) { return nil, err } - pm := pruning.NewManager(sc, ss, storeOpts.SCPruningOption, storeOpts.SSPruningOption) - return New(opts.SCRawDB, opts.Logger, ss, sc, pm, nil, nil) + pm := pruning.NewManager(sc, storeOpts.SCPruningOption) + return New(opts.SCRawDB, opts.Logger, sc, pm, nil, nil) } diff --git a/store/v2/root/migrate_test.go b/store/v2/root/migrate_test.go index 82a53919d93f..3b431bdb24f6 100644 --- a/store/v2/root/migrate_test.go +++ b/store/v2/root/migrate_test.go @@ -17,8 +17,6 @@ import ( "cosmossdk.io/store/v2/migration" "cosmossdk.io/store/v2/pruning" "cosmossdk.io/store/v2/snapshots" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/pebbledb" ) var storeKeys = []string{"store1", "store2", "store3"} @@ -61,11 +59,6 @@ func (s *MigrateStoreTestSuite) SetupTest() { s.Require().NoError(err) } - // create a new storage and commitment stores - pebbleDB, err := pebbledb.New(s.T().TempDir()) - s.Require().NoError(err) - ss := storage.NewStorageStore(pebbleDB, testLog) - multiTrees1 := make(map[string]commitment.Tree) for _, storeKey := range storeKeys { multiTrees1[storeKey] = iavl.NewIavlTree(dbm.NewMemDB(), nopLog, iavl.DefaultConfig()) @@ -75,12 +68,12 @@ func (s *MigrateStoreTestSuite) SetupTest() { snapshotsStore, err := snapshots.NewStore(s.T().TempDir()) s.Require().NoError(err) - snapshotManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), orgSC, nil, nil, testLog) - migrationManager := migration.NewManager(dbm.NewMemDB(), snapshotManager, ss, sc, testLog) - pm := pruning.NewManager(sc, ss, nil, nil) + snapshotManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), orgSC, nil, testLog) + migrationManager := migration.NewManager(dbm.NewMemDB(), snapshotManager, sc, testLog) + pm := pruning.NewManager(sc, nil) // assume no storage store, simulate the migration process - s.rootStore, err = New(dbm.NewMemDB(), testLog, ss, orgSC, pm, migrationManager, nil) + s.rootStore, err = New(dbm.NewMemDB(), testLog, orgSC, pm, migrationManager, nil) s.Require().NoError(err) } @@ -115,7 +108,7 @@ func (s *MigrateStoreTestSuite) TestMigrateState() { s.Require().NoError(err) // check if the migration is completed - ver, err := s.rootStore.GetStateStorage().GetLatestVersion() + ver, err := s.rootStore.GetLatestVersion() s.Require().NoError(err) if ver == latestVersion { break diff --git a/store/v2/root/store.go b/store/v2/root/store.go index b40baef6424e..6faa51602c5b 100644 --- a/store/v2/root/store.go +++ b/store/v2/root/store.go @@ -34,9 +34,6 @@ type Store struct { // holds the db instance for closing it dbCloser io.Closer - // stateStorage reflects the state storage backend - stateStorage store.VersionedWriter - // stateCommitment reflects the state commitment (SC) backend stateCommitment store.Committer @@ -67,7 +64,6 @@ type Store struct { func New( dbCloser io.Closer, logger corelog.Logger, - ss store.VersionedWriter, sc store.Committer, pm *pruning.Manager, mm *migration.Manager, @@ -76,7 +72,6 @@ func New( return &Store{ dbCloser: dbCloser, logger: logger, - stateStorage: ss, stateCommitment: sc, pruningManager: pm, migrationManager: mm, @@ -88,11 +83,9 @@ func New( // Close closes the store and resets all internal fields. Note, Close() is NOT // idempotent and should only be called once. func (s *Store) Close() (err error) { - err = errors.Join(err, s.stateStorage.Close()) err = errors.Join(err, s.stateCommitment.Close()) err = errors.Join(err, s.dbCloser.Close()) - s.stateStorage = nil s.stateCommitment = nil s.lastCommitInfo = nil @@ -113,24 +106,13 @@ func (s *Store) SetInitialVersion(v uint64) error { // and the version exists in the state commitment, since the state storage will be // synced during migration. func (s *Store) getVersionedReader(version uint64) (store.VersionedReader, error) { - isExist, err := s.stateStorage.VersionExists(version) + isExist, err := s.stateCommitment.VersionExists(version) if err != nil { return nil, err } if isExist { - return s.stateStorage, nil + return s.stateCommitment, nil } - - if vReader, ok := s.stateCommitment.(store.VersionedReader); ok { - isExist, err := vReader.VersionExists(version) - if err != nil { - return nil, err - } - if isExist { - return vReader, nil - } - } - return nil, fmt.Errorf("version %d does not exist", version) } @@ -139,7 +121,6 @@ func (s *Store) StateLatest() (uint64, corestore.ReaderMap, error) { if err != nil { return 0, nil, err } - vReader, err := s.getVersionedReader(v) if err != nil { return 0, nil, err @@ -154,10 +135,6 @@ func (s *Store) StateAt(v uint64) (corestore.ReaderMap, error) { return NewReaderMap(v, vReader), err } -func (s *Store) GetStateStorage() store.VersionedWriter { - return s.stateStorage -} - func (s *Store) GetStateCommitment() store.Committer { return s.stateCommitment } @@ -198,29 +175,9 @@ func (s *Store) Query(storeKey []byte, version uint64, key []byte, prove bool) ( defer s.telemetry.MeasureSince(now, "root_store", "query") } - var val []byte - var err error - if s.isMigrating { // if we're migrating, we need to query the SC backend - val, err = s.stateCommitment.Get(storeKey, version, key) - if err != nil { - return store.QueryResult{}, fmt.Errorf("failed to query SC store: %w", err) - } - } else { - val, err = s.stateStorage.Get(storeKey, version, key) - if err != nil { - return store.QueryResult{}, fmt.Errorf("failed to query SS store: %w", err) - } - if val == nil { - // fallback to querying SC backend if not found in SS backend - // - // Note, this should only used during migration, i.e. while SS and IAVL v2 - // are being asynchronously synced. - bz, scErr := s.stateCommitment.Get(storeKey, version, key) - if scErr != nil { - return store.QueryResult{}, fmt.Errorf("failed to query SC store: %w", scErr) - } - val = bz - } + val, err := s.stateCommitment.Get(storeKey, version, key) + if err != nil { + return store.QueryResult{}, fmt.Errorf("failed to query SC store: %w", err) } result := store.QueryResult{ @@ -291,15 +248,6 @@ func (s *Store) LoadVersionAndUpgrade(version uint64, upgrades *corestore.StoreU return err } - // if the state storage implements the UpgradableDatabase interface, prune the - // deleted store keys - upgradableDatabase, ok := s.stateStorage.(store.UpgradableDatabase) - if ok { - if err := upgradableDatabase.PruneStoreKeys(upgrades.Deleted, version); err != nil { - return fmt.Errorf("failed to prune store keys %v: %w", upgrades.Deleted, err) - } - } - return nil } @@ -318,11 +266,7 @@ func (s *Store) loadVersion(v uint64, upgrades *corestore.StoreUpgrades, overrid } } else { // if upgrades are provided, we need to load the version and apply the upgrades - upgradeableStore, ok := s.stateCommitment.(store.UpgradeableStore) - if !ok { - return errors.New("SC store does not support upgrades") - } - if err := upgradeableStore.LoadVersionAndUpgrade(v, upgrades); err != nil { + if err := s.stateCommitment.LoadVersionAndUpgrade(v, upgrades); err != nil { return fmt.Errorf("failed to load SS version with upgrades %d: %w", v, err) } } @@ -363,18 +307,6 @@ func (s *Store) Commit(cs *corestore.Changeset) ([]byte, error) { eg := new(errgroup.Group) - // if migrating the changeset will be sent to migration manager to fill SS - // otherwise commit to SS async here - if !s.isMigrating { - eg.Go(func() error { - if err := s.stateStorage.ApplyChangeset(cs); err != nil { - return fmt.Errorf("failed to commit SS: %w", err) - } - - return nil - }) - } - // commit SC async var cInfo *proof.CommitInfo eg.Go(func() error { diff --git a/store/v2/root/store_mock_test.go b/store/v2/root/store_mock_test.go index 4b43d52f7f7e..0ec0a31bdaf2 100644 --- a/store/v2/root/store_mock_test.go +++ b/store/v2/root/store_mock_test.go @@ -15,13 +15,12 @@ import ( "cosmossdk.io/store/v2/pruning" ) -func newTestRootStore(ss store.VersionedWriter, sc store.Committer) *Store { +func newTestRootStore(sc store.Committer) *Store { noopLog := coretesting.NewNopLogger() - pm := pruning.NewManager(sc.(store.Pruner), ss.(store.Pruner), nil, nil) + pm := pruning.NewManager(sc.(store.Pruner), nil) return &Store{ logger: noopLog, telemetry: metrics.Metrics{}, - stateStorage: ss, stateCommitment: sc, pruningManager: pm, isMigrating: false, @@ -30,9 +29,8 @@ func newTestRootStore(ss store.VersionedWriter, sc store.Committer) *Store { func TestGetLatestState(t *testing.T) { ctrl := gomock.NewController(t) - ss := mock.NewMockStateStorage(ctrl) sc := mock.NewMockStateCommitter(ctrl) - rs := newTestRootStore(ss, sc) + rs := newTestRootStore(sc) // Get the latest version sc.EXPECT().GetLatestVersion().Return(uint64(0), errors.New("error")) @@ -46,46 +44,36 @@ func TestGetLatestState(t *testing.T) { func TestQuery(t *testing.T) { ctrl := gomock.NewController(t) - ss := mock.NewMockStateStorage(ctrl) sc := mock.NewMockStateCommitter(ctrl) - rs := newTestRootStore(ss, sc) + rs := newTestRootStore(sc) // Query without Proof - ss.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error")) - _, err := rs.Query(nil, 0, nil, false) - require.Error(t, err) - ss.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error")) - _, err = rs.Query(nil, 0, nil, false) + _, err := rs.Query(nil, 0, nil, false) require.Error(t, err) - ss.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) v, err := rs.Query(nil, 0, nil, false) require.NoError(t, err) require.Equal(t, []byte("value"), v.Value) - ss.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) - v, err = rs.Query(nil, 0, nil, false) - require.NoError(t, err) - require.Equal(t, []byte("value"), v.Value) // Query with Proof - ss.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) + sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) sc.EXPECT().GetProof(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error")) - v, err = rs.Query(nil, 0, nil, true) + _, err = rs.Query(nil, 0, nil, true) require.Error(t, err) // Query with Migration + rs.isMigrating = true - sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error")) + sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil) _, err = rs.Query(nil, 0, nil, false) - require.Error(t, err) + require.NoError(t, err) } func TestLoadVersion(t *testing.T) { ctrl := gomock.NewController(t) - ss := mock.NewMockStateStorage(ctrl) sc := mock.NewMockStateCommitter(ctrl) - rs := newTestRootStore(ss, sc) + rs := newTestRootStore(sc) // LoadLatestVersion sc.EXPECT().GetLatestVersion().Return(uint64(0), errors.New("error")) @@ -107,11 +95,6 @@ func TestLoadVersion(t *testing.T) { sc.EXPECT().LoadVersionAndUpgrade(uint64(2), v).Return(errors.New("error")) err = rs.LoadVersionAndUpgrade(uint64(2), v) require.Error(t, err) - sc.EXPECT().LoadVersionAndUpgrade(uint64(2), v).Return(nil) - sc.EXPECT().GetCommitInfo(uint64(2)).Return(nil, nil) - ss.EXPECT().PruneStoreKeys(gomock.Any(), uint64(2)).Return(errors.New("error")) - err = rs.LoadVersionAndUpgrade(uint64(2), v) - require.Error(t, err) // LoadVersionUpgrade with Migration rs.isMigrating = true diff --git a/store/v2/root/store_test.go b/store/v2/root/store_test.go index 8bb6b5604e2d..59df4d68384d 100644 --- a/store/v2/root/store_test.go +++ b/store/v2/root/store_test.go @@ -16,8 +16,6 @@ import ( dbm "cosmossdk.io/store/v2/db" "cosmossdk.io/store/v2/proof" "cosmossdk.io/store/v2/pruning" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/pebbledb" ) const ( @@ -47,18 +45,14 @@ func TestStorageTestSuite(t *testing.T) { func (s *RootStoreTestSuite) SetupTest() { noopLog := coretesting.NewNopLogger() - pebbleDB, err := pebbledb.New(s.T().TempDir()) - s.Require().NoError(err) - ss := storage.NewStorageStore(pebbleDB, noopLog) - tree := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) tree2 := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) tree3 := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) sc, err := commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree, testStoreKey2: tree2, testStoreKey3: tree3}, nil, dbm.NewMemDB(), noopLog) s.Require().NoError(err) - pm := pruning.NewManager(sc, ss, nil, nil) - rs, err := New(dbm.NewMemDB(), noopLog, ss, sc, pm, nil, nil) + pm := pruning.NewManager(sc, nil) + rs, err := New(dbm.NewMemDB(), noopLog, sc, pm, nil, nil) s.Require().NoError(err) s.rootStore = rs @@ -67,10 +61,6 @@ func (s *RootStoreTestSuite) SetupTest() { func (s *RootStoreTestSuite) newStoreWithPruneConfig(config *store.PruningOption) { noopLog := coretesting.NewNopLogger() - pebbleDB, err := pebbledb.New(s.T().TempDir()) - s.Require().NoError(err) - ss := storage.NewStorageStore(pebbleDB, noopLog) - mdb := dbm.NewMemDB() multiTrees := make(map[string]commitment.Tree) for _, storeKey := range testStoreKeys { @@ -81,18 +71,18 @@ func (s *RootStoreTestSuite) newStoreWithPruneConfig(config *store.PruningOption sc, err := commitment.NewCommitStore(multiTrees, nil, dbm.NewMemDB(), noopLog) s.Require().NoError(err) - pm := pruning.NewManager(sc, ss, config, config) + pm := pruning.NewManager(sc, config) - rs, err := New(dbm.NewMemDB(), noopLog, ss, sc, pm, nil, nil) + rs, err := New(dbm.NewMemDB(), noopLog, sc, pm, nil, nil) s.Require().NoError(err) s.rootStore = rs } -func (s *RootStoreTestSuite) newStoreWithBackendMount(ss store.VersionedWriter, sc store.Committer, pm *pruning.Manager) { +func (s *RootStoreTestSuite) newStoreWithBackendMount(sc store.Committer, pm *pruning.Manager) { noopLog := coretesting.NewNopLogger() - rs, err := New(dbm.NewMemDB(), noopLog, ss, sc, pm, nil, nil) + rs, err := New(dbm.NewMemDB(), noopLog, sc, pm, nil, nil) s.Require().NoError(err) s.rootStore = rs @@ -107,10 +97,6 @@ func (s *RootStoreTestSuite) TestGetStateCommitment() { s.Require().Equal(s.rootStore.GetStateCommitment(), s.rootStore.(*Store).stateCommitment) } -func (s *RootStoreTestSuite) TestGetStateStorage() { - s.Require().Equal(s.rootStore.GetStateStorage(), s.rootStore.(*Store).stateStorage) -} - func (s *RootStoreTestSuite) TestSetInitialVersion() { initialVersion := uint64(5) s.Require().NoError(s.rootStore.SetInitialVersion(initialVersion)) @@ -603,17 +589,14 @@ func (s *RootStoreTestSuite) TestMultiStore_PruningRestart() { mdb1 := dbm.NewMemDB() mdb2 := dbm.NewMemDB() - pebbleDB, err := pebbledb.New(s.T().TempDir()) - s.Require().NoError(err) - ss := storage.NewStorageStore(pebbleDB, noopLog) tree := iavl.NewIavlTree(mdb1, noopLog, iavl.DefaultConfig()) sc, err := commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree}, nil, mdb2, noopLog) s.Require().NoError(err) - pm := pruning.NewManager(sc, ss, pruneOpt, pruneOpt) + pm := pruning.NewManager(sc, pruneOpt) - s.newStoreWithBackendMount(ss, sc, pm) + s.newStoreWithBackendMount(sc, pm) s.Require().NoError(s.rootStore.LoadLatestVersion()) // Commit enough to build up heights to prune, where on the next block we should @@ -633,18 +616,13 @@ func (s *RootStoreTestSuite) TestMultiStore_PruningRestart() { s.Require().False(ok) s.Require().Equal(uint64(0), actualHeightToPrune) - // "restart" - pebbleDB, err = pebbledb.New(s.T().TempDir()) - s.Require().NoError(err) - ss = storage.NewStorageStore(pebbleDB, noopLog) - tree = iavl.NewIavlTree(mdb1, noopLog, iavl.DefaultConfig()) sc, err = commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree}, nil, mdb2, noopLog) s.Require().NoError(err) - pm = pruning.NewManager(sc, ss, pruneOpt, pruneOpt) + pm = pruning.NewManager(sc, pruneOpt) - s.newStoreWithBackendMount(ss, sc, pm) + s.newStoreWithBackendMount(sc, pm) err = s.rootStore.LoadLatestVersion() s.Require().NoError(err) @@ -684,11 +662,6 @@ func (s *RootStoreTestSuite) TestMultiStore_PruningRestart() { func (s *RootStoreTestSuite) TestMultiStoreRestart() { noopLog := coretesting.NewNopLogger() - pebbleDB, err := pebbledb.New(s.T().TempDir()) - s.Require().NoError(err) - - ss := storage.NewStorageStore(pebbleDB, noopLog) - mdb1 := dbm.NewMemDB() mdb2 := dbm.NewMemDB() multiTrees := make(map[string]commitment.Tree) @@ -700,9 +673,9 @@ func (s *RootStoreTestSuite) TestMultiStoreRestart() { sc, err := commitment.NewCommitStore(multiTrees, nil, mdb2, noopLog) s.Require().NoError(err) - pm := pruning.NewManager(sc, ss, nil, nil) + pm := pruning.NewManager(sc, nil) - s.newStoreWithBackendMount(ss, sc, pm) + s.newStoreWithBackendMount(sc, pm) s.Require().NoError(s.rootStore.LoadLatestVersion()) // perform changes @@ -787,9 +760,9 @@ func (s *RootStoreTestSuite) TestMultiStoreRestart() { sc, err = commitment.NewCommitStore(multiTrees, nil, mdb2, noopLog) s.Require().NoError(err) - pm = pruning.NewManager(sc, ss, nil, nil) + pm = pruning.NewManager(sc, nil) - s.newStoreWithBackendMount(ss, sc, pm) + s.newStoreWithBackendMount(sc, pm) err = s.rootStore.LoadLatestVersion() s.Require().Nil(err) diff --git a/store/v2/root/upgrade_test.go b/store/v2/root/upgrade_test.go index 0e9da266a355..1bcee4149b48 100644 --- a/store/v2/root/upgrade_test.go +++ b/store/v2/root/upgrade_test.go @@ -14,8 +14,6 @@ import ( "cosmossdk.io/store/v2/commitment/iavl" dbm "cosmossdk.io/store/v2/db" "cosmossdk.io/store/v2/pruning" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/pebbledb" ) type UpgradeStoreTestSuite struct { @@ -43,14 +41,10 @@ func (s *UpgradeStoreTestSuite) SetupTest() { multiTrees[storeKey], _ = newTreeFn(storeKey) } - // create storage and commitment stores - pebbleDB, err := pebbledb.New(s.T().TempDir()) - s.Require().NoError(err) - ss := storage.NewStorageStore(pebbleDB, testLog) sc, err := commitment.NewCommitStore(multiTrees, nil, s.commitDB, testLog) s.Require().NoError(err) - pm := pruning.NewManager(sc, ss, nil, nil) - s.rootStore, err = New(s.commitDB, testLog, ss, sc, pm, nil, nil) + pm := pruning.NewManager(sc, nil) + s.rootStore, err = New(s.commitDB, testLog, sc, pm, nil, nil) s.Require().NoError(err) // commit changeset @@ -91,8 +85,8 @@ func (s *UpgradeStoreTestSuite) loadWithUpgrades(upgrades *corestore.StoreUpgrad sc, err := commitment.NewCommitStore(multiTrees, oldTrees, s.commitDB, testLog) s.Require().NoError(err) - pm := pruning.NewManager(sc, s.rootStore.GetStateStorage().(store.Pruner), nil, nil) - s.rootStore, err = New(s.commitDB, testLog, s.rootStore.GetStateStorage(), sc, pm, nil, nil) + pm := pruning.NewManager(sc, nil) + s.rootStore, err = New(s.commitDB, testLog, sc, pm, nil, nil) s.Require().NoError(err) } @@ -112,7 +106,7 @@ func (s *UpgradeStoreTestSuite) TestLoadVersionAndUpgrade() { keyCount := 10 // check old store keys are queryable - oldStoreKeys := []string{"store1", "store3"} + oldStoreKeys := []string{"store1", "store2", "store3"} for _, storeKey := range oldStoreKeys { for version := uint64(1); version <= v; version++ { for i := 0; i < keyCount; i++ { diff --git a/store/v2/snapshots/helpers_test.go b/store/v2/snapshots/helpers_test.go index 657fd5c6f1ba..40090c896817 100644 --- a/store/v2/snapshots/helpers_test.go +++ b/store/v2/snapshots/helpers_test.go @@ -14,7 +14,6 @@ import ( protoio "github.com/cosmos/gogoproto/io" "github.com/stretchr/testify/require" - corestore "cosmossdk.io/core/store" coretesting "cosmossdk.io/core/testing" "cosmossdk.io/store/v2/snapshots" snapshotstypes "cosmossdk.io/store/v2/snapshots/types" @@ -109,7 +108,7 @@ type mockCommitSnapshotter struct { } func (m *mockCommitSnapshotter) Restore( - height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges, + height uint64, format uint32, protoReader protoio.Reader, ) (snapshotstypes.SnapshotItem, error) { if format == 0 { return snapshotstypes.SnapshotItem{}, snapshotstypes.ErrUnknownFormat @@ -120,7 +119,6 @@ func (m *mockCommitSnapshotter) Restore( var item snapshotstypes.SnapshotItem m.items = [][]byte{} - keyCount := 0 for { item.Reset() err := protoReader.ReadMsg(&item) @@ -134,19 +132,6 @@ func (m *mockCommitSnapshotter) Restore( break } m.items = append(m.items, payload.Payload) - // mock feeding chStorage to check if the loop closed properly - // - // ref: https://github.com/cosmos/cosmos-sdk/pull/21106 - chStorage <- &corestore.StateChanges{ - Actor: []byte("actor"), - StateChanges: []corestore.KVPair{ - { - Key: []byte(fmt.Sprintf("key-%d", keyCount)), - Value: payload.Payload, - }, - }, - } - keyCount++ } return item, nil @@ -169,22 +154,6 @@ func (m *mockCommitSnapshotter) SupportedFormats() []uint32 { return []uint32{snapshotstypes.CurrentFormat} } -type mockStorageSnapshotter struct { - items map[string][]byte -} - -func (m *mockStorageSnapshotter) Restore(version uint64, chStorage <-chan *corestore.StateChanges) error { - // mock consuming chStorage to check if the loop closed properly - // - // ref: https://github.com/cosmos/cosmos-sdk/pull/21106 - for change := range chStorage { - for _, kv := range change.StateChanges { - m.items[string(kv.Key)] = kv.Value - } - } - return nil -} - type mockErrorCommitSnapshotter struct{} var _ snapshots.CommitSnapshotter = (*mockErrorCommitSnapshotter)(nil) @@ -194,7 +163,7 @@ func (m *mockErrorCommitSnapshotter) Snapshot(height uint64, protoWriter protoio } func (m *mockErrorCommitSnapshotter) Restore( - height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges, + height uint64, format uint32, protoReader protoio.Reader, ) (snapshotstypes.SnapshotItem, error) { return snapshotstypes.SnapshotItem{}, errors.New("mock restore error") } @@ -214,7 +183,7 @@ func setupBusyManager(t *testing.T) *snapshots.Manager { store, err := snapshots.NewStore(t.TempDir()) require.NoError(t, err) hung := newHungCommitSnapshotter() - mgr := snapshots.NewManager(store, opts, hung, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + mgr := snapshots.NewManager(store, opts, hung, nil, coretesting.NewNopLogger()) // Channel to ensure the test doesn't finish until the goroutine is done. // Without this, there are intermittent test failures about @@ -258,7 +227,7 @@ func (m *hungCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writ } func (m *hungCommitSnapshotter) Restore( - height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges, + height uint64, format uint32, protoReader protoio.Reader, ) (snapshotstypes.SnapshotItem, error) { panic("not implemented") } diff --git a/store/v2/snapshots/manager.go b/store/v2/snapshots/manager.go index 85d2cf26be25..a0d7895513d8 100644 --- a/store/v2/snapshots/manager.go +++ b/store/v2/snapshots/manager.go @@ -12,7 +12,6 @@ import ( "sync" corelog "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" errorsmod "cosmossdk.io/errors/v2" storeerrors "cosmossdk.io/store/v2/errors" "cosmossdk.io/store/v2/snapshots/types" @@ -38,8 +37,6 @@ type Manager struct { opts SnapshotOptions // commitSnapshotter is the snapshotter for the commitment state. commitSnapshotter CommitSnapshotter - // storageSnapshotter is the snapshotter for the storage state. - storageSnapshotter StorageSnapshotter logger corelog.Logger @@ -76,17 +73,16 @@ const ( var ErrOptsZeroSnapshotInterval = errors.New("snapshot-interval must not be 0") // NewManager creates a new manager. -func NewManager(store *Store, opts SnapshotOptions, commitSnapshotter CommitSnapshotter, storageSnapshotter StorageSnapshotter, extensions map[string]ExtensionSnapshotter, logger corelog.Logger) *Manager { +func NewManager(store *Store, opts SnapshotOptions, commitSnapshotter CommitSnapshotter, extensions map[string]ExtensionSnapshotter, logger corelog.Logger) *Manager { if extensions == nil { extensions = map[string]ExtensionSnapshotter{} } return &Manager{ - store: store, - opts: opts, - commitSnapshotter: commitSnapshotter, - storageSnapshotter: storageSnapshotter, - extensions: extensions, - logger: logger, + store: store, + opts: opts, + commitSnapshotter: commitSnapshotter, + extensions: extensions, + logger: logger, } } @@ -398,23 +394,10 @@ func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io. return payload.Payload, nil } - // chStorage is the channel to pass the KV pairs to the storage snapshotter. - chStorage := make(chan *corestore.StateChanges, defaultStorageChannelBufferSize) - - storageErrs := make(chan error, 1) - go func() { - defer close(storageErrs) - err := m.storageSnapshotter.Restore(snapshot.Height, chStorage) - if err != nil { - storageErrs <- err - } - }() - - nextItem, err = m.commitSnapshotter.Restore(snapshot.Height, snapshot.Format, streamReader, chStorage) + nextItem, err = m.commitSnapshotter.Restore(snapshot.Height, snapshot.Format, streamReader) if err != nil { return errorsmod.Wrap(err, "multistore restore") } - close(chStorage) for { if nextItem.Item == nil { @@ -445,11 +428,6 @@ func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io. } } - // wait for storage snapshotter to complete - if err := <-storageErrs; err != nil { - return errorsmod.Wrap(err, "storage snapshotter") - } - return nil } diff --git a/store/v2/snapshots/manager_test.go b/store/v2/snapshots/manager_test.go index 2ecec5660066..e374b4c75cd0 100644 --- a/store/v2/snapshots/manager_test.go +++ b/store/v2/snapshots/manager_test.go @@ -2,7 +2,6 @@ package snapshots_test import ( "errors" - "fmt" "testing" "time" @@ -19,8 +18,7 @@ var opts = snapshots.NewSnapshotOptions(1500, 2) func TestManager_List(t *testing.T) { store := setupStore(t) commitSnapshotter := &mockCommitSnapshotter{} - storageSnapshotter := &mockStorageSnapshotter{} - manager := snapshots.NewManager(store, opts, commitSnapshotter, storageSnapshotter, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) mgrList, err := manager.List() require.NoError(t, err) @@ -41,7 +39,7 @@ func TestManager_List(t *testing.T) { func TestManager_LoadChunk(t *testing.T) { store := setupStore(t) - manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, nil, coretesting.NewNopLogger()) // Existing chunk should return body chunk, err := manager.LoadChunk(2, 1, 1) @@ -73,7 +71,7 @@ func TestManager_Take(t *testing.T) { extSnapshotter := newExtSnapshotter(10) expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) err := manager.RegisterExtensions(extSnapshotter) require.NoError(t, err) @@ -112,7 +110,7 @@ func TestManager_Take(t *testing.T) { func TestManager_Prune(t *testing.T) { store := setupStore(t) - manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, nil, coretesting.NewNopLogger()) pruned, err := manager.Prune(2) require.NoError(t, err) @@ -131,9 +129,8 @@ func TestManager_Prune(t *testing.T) { func TestManager_Restore(t *testing.T) { store := setupStore(t) target := &mockCommitSnapshotter{} - storageSnapshotter := &mockStorageSnapshotter{items: map[string][]byte{}} extSnapshotter := newExtSnapshotter(0) - manager := snapshots.NewManager(store, opts, target, storageSnapshotter, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, target, nil, coretesting.NewNopLogger()) err := manager.RegisterExtensions(extSnapshotter) require.NoError(t, err) @@ -206,14 +203,6 @@ func TestManager_Restore(t *testing.T) { assert.Equal(t, expectItems, target.items) assert.Equal(t, 10, len(extSnapshotter.state)) - // make sure storageSnapshotter items are properly stored - for i, item := range target.items { - key := fmt.Sprintf("key-%d", i) - chunk := storageSnapshotter.items[key] - require.NotNil(t, chunk) - require.Equal(t, item, chunk) - } - // The snapshot is saved in local snapshot store snapshots, err := store.List() require.NoError(t, err) @@ -260,7 +249,7 @@ func TestManager_TakeError(t *testing.T) { snapshotter := &mockErrorCommitSnapshotter{} store, err := snapshots.NewStore(t.TempDir()) require.NoError(t, err) - manager := snapshots.NewManager(store, opts, snapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, snapshotter, nil, coretesting.NewNopLogger()) _, err = manager.Create(1) require.Error(t, err) @@ -276,12 +265,11 @@ func TestSnapshot_Take_Restore(t *testing.T) { commitSnapshotter := &mockCommitSnapshotter{ items: items, } - storageSnapshotter := &mockStorageSnapshotter{items: map[string][]byte{}} extSnapshotter := newExtSnapshotter(10) expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, commitSnapshotter, storageSnapshotter, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) err := manager.RegisterExtensions(extSnapshotter) require.NoError(t, err) @@ -367,7 +355,7 @@ func TestSnapshot_Take_Prune(t *testing.T) { extSnapshotter := newExtSnapshotter(10) expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) err := manager.RegisterExtensions(extSnapshotter) require.NoError(t, err) @@ -446,7 +434,7 @@ func TestSnapshot_Pruning_Take_Snapshot_Parallel(t *testing.T) { extSnapshotter := newExtSnapshotter(10) expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger()) err := manager.RegisterExtensions(extSnapshotter) require.NoError(t, err) @@ -518,7 +506,7 @@ func TestSnapshot_SnapshotIfApplicable(t *testing.T) { snapshotOpts := snapshots.NewSnapshotOptions(1, 1) - manager := snapshots.NewManager(store, snapshotOpts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) + manager := snapshots.NewManager(store, snapshotOpts, commitSnapshotter, nil, coretesting.NewNopLogger()) err := manager.RegisterExtensions(extSnapshotter) require.NoError(t, err) diff --git a/store/v2/snapshots/snapshotter.go b/store/v2/snapshots/snapshotter.go index 9b054060a36d..f3f4d33f1cf5 100644 --- a/store/v2/snapshots/snapshotter.go +++ b/store/v2/snapshots/snapshotter.go @@ -3,7 +3,6 @@ package snapshots import ( protoio "github.com/cosmos/gogoproto/io" - corestore "cosmossdk.io/core/store" "cosmossdk.io/store/v2/snapshots/types" ) @@ -14,13 +13,7 @@ type CommitSnapshotter interface { Snapshot(version uint64, protoWriter protoio.Writer) error // Restore restores the commitment state from the snapshot reader. - Restore(version uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges) (types.SnapshotItem, error) -} - -// StorageSnapshotter defines an API for restoring snapshots of the storage state. -type StorageSnapshotter interface { - // Restore restores the storage state from the given channel. - Restore(version uint64, chStorage <-chan *corestore.StateChanges) error + Restore(version uint64, format uint32, protoReader protoio.Reader) (types.SnapshotItem, error) } // ExtensionPayloadReader read extension payloads, diff --git a/store/v2/storage/README.md b/store/v2/storage/README.md deleted file mode 100644 index aaffab357c30..000000000000 --- a/store/v2/storage/README.md +++ /dev/null @@ -1,107 +0,0 @@ -# State Storage (SS) - -The `storage` package contains the state storage (SS) implementation. Specifically, -it contains RocksDB, PebbleDB, and SQLite (Btree) backend implementations of the -`VersionedWriter` interface. - -The goal of SS is to provide a modular storage backend, i.e. multiple implementations, -to facilitate storing versioned raw key/value pairs in a fast embedded database, -although an embedded database is not required, i.e. you could use a replicated -RDBMS system. - -The responsibility and functions of SS include the following: - -* Provide fast and efficient queries for versioned raw key/value pairs -* Provide versioned CRUD operations -* Provide versioned batching functionality -* Provide versioned iteration (forward and reverse) functionality -* Provide pruning functionality - -All of the functionality provided by an SS backend should work under a versioned -scheme, i.e. a user should be able to get, store, and iterate over keys for the -latest and historical versions efficiently. - -## Backends - -### RocksDB - -The RocksDB implementation is a CGO-based SS implementation. It fully supports -the `VersionedWriter` API and is arguably the most efficient implementation. It -also supports versioning out-of-the-box using User-defined Timestamps in -ColumnFamilies (CF). However, it requires the CGO dependency which can complicate -an app’s build process. - -### PebbleDB - -The PebbleDB implementation is a native Go SS implementation that is primarily an -alternative to RocksDB. Since it does not support CF, results in the fact that we -need to implement versioning (MVCC) ourselves. This comes with added implementation -complexity and potential performance overhead. However, it is a pure Go implementation -and does not require CGO. - -### SQLite (Btree) - -The SQLite implementation is another CGO-based SS implementation. It fully supports -the `VersionedWriter` API. The implementation is relatively straightforward and -easy to understand as it’s entirely SQL-based. However, benchmarks show that this -options is least performant, even for reads. This SS backend has a lot of promise, -but needs more benchmarking and potential SQL optimizations, like dedicated tables -for certain aspects of state, e.g. latest state, to be extremely performant. - -## Benchmarks - -Benchmarks for basic operations on all supported native SS implementations can -be found in `store/storage/storage_bench_test.go`. - -At the time of writing, the following benchmarks were performed: - -```shell -name time/op -Get/backend_rocksdb_versiondb_opts-10 7.41µs ± 0% -Get/backend_pebbledb_default_opts-10 6.17µs ± 0% -Get/backend_btree_sqlite-10 29.1µs ± 0% -ApplyChangeset/backend_pebbledb_default_opts-10 5.73ms ± 0% -ApplyChangeset/backend_btree_sqlite-10 56.9ms ± 0% -ApplyChangeset/backend_rocksdb_versiondb_opts-10 4.07ms ± 0% -Iterate/backend_pebbledb_default_opts-10 1.04s ± 0% -Iterate/backend_btree_sqlite-10 1.59s ± 0% -Iterate/backend_rocksdb_versiondb_opts-10 778ms ± 0% -``` - -## Pruning - -Pruning is the process of efficiently managing and removing outdated or redundant -data from the State Storage (SS). To facilitate this, the SS backend must implement -the `Pruner` interface, allowing the `PruningManager` to execute data pruning operations -according to the specified `PruningOption`. - -## State Sync - -State storage (SS) does not have a direct notion of state sync. Rather, `snapshots.Manager` -is responsible for creating and restoring snapshots of the entire state. The -`snapshots.Manager` has a `StorageSnapshotter` field which is fulfilled by the -`StorageStore` type, specifically it implements the `Restore` method. The `Restore` -method reads off of a provided channel and writes key/value pairs directly to a -batch object which is committed to the underlying SS engine. - -## Non-Consensus Data - - - -## Usage - -An SS backend is meant to be used within a broader store implementation, as it -only stores data for direct and historical query purposes. We define a `Database` -interface in the `storage` package which is mean to be represent a `VersionedWriter` -with only the necessary methods. The `StorageStore` interface is meant to wrap or -accept this `Database` type, e.g. RocksDB. - -The `StorageStore` interface is an abstraction or wrapper around the backing SS -engine can be seen as the main entry point to using SS. - -Higher up the stack, there should exist a `root.Store` implementation. The `root.Store` -is meant to encapsulate both an SS backend and an SC backend. The SS backend is -defined by this `StorageStore` implementation. - -In short, initialize your SS engine of choice and then provide that to `NewStorageStore` -which will further be provided to `root.Store` as the SS backend. diff --git a/store/v2/storage/database.go b/store/v2/storage/database.go deleted file mode 100644 index e969a9ee6338..000000000000 --- a/store/v2/storage/database.go +++ /dev/null @@ -1,27 +0,0 @@ -package storage - -import ( - "io" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" -) - -// Database is an interface that wraps the storage database methods. A wrapper -// is useful for instances where you want to perform logic that is identical for all SS -// backends, such as restoring snapshots. -type Database interface { - NewBatch(version uint64) (store.Batch, error) - Has(storeKey []byte, version uint64, key []byte) (bool, error) - Get(storeKey []byte, version uint64, key []byte) ([]byte, error) - GetLatestVersion() (uint64, error) - SetLatestVersion(version uint64) error - VersionExists(version uint64) (bool, error) - - Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) - ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) - - Prune(version uint64) error - - io.Closer -} diff --git a/store/v2/storage/pebbledb/batch.go b/store/v2/storage/pebbledb/batch.go deleted file mode 100644 index 7e9f7a347f98..000000000000 --- a/store/v2/storage/pebbledb/batch.go +++ /dev/null @@ -1,98 +0,0 @@ -package pebbledb - -import ( - "encoding/binary" - "errors" - "fmt" - - "github.com/cockroachdb/pebble" - - "cosmossdk.io/store/v2" -) - -var _ store.Batch = (*Batch)(nil) - -type Batch struct { - storage *pebble.DB - batch *pebble.Batch - version uint64 - sync bool - size int -} - -const ( - oneIf64Bit = ^uint(0) >> 63 - maxUint32OrInt = (1<<31)< maxUint32OrInt { - // 4 GB is huge, probably genesis; flush and reset - if err := b.batch.Commit(&pebble.WriteOptions{Sync: b.sync}); err != nil { - return fmt.Errorf("max batch size exceed: failed to write PebbleDB batch: %w", err) - } - b.batch.Reset() - b.size = 0 - } - - if err := b.batch.Set(prefixedKey, prefixedVal, nil); err != nil { - return fmt.Errorf("failed to write PebbleDB batch: %w", err) - } - b.size += size - - return nil -} - -func (b *Batch) Set(storeKey, key, value []byte) error { - return b.set(storeKey, 0, key, value) -} - -func (b *Batch) Delete(storeKey, key []byte) error { - return b.set(storeKey, b.version, key, []byte(tombstoneVal)) -} - -// Write flushes any accumulated data to disk and closes the batch. -func (b *Batch) Write() (err error) { - defer func() { - err = errors.Join(err, b.batch.Close()) - }() - - return b.batch.Commit(&pebble.WriteOptions{Sync: b.sync}) -} diff --git a/store/v2/storage/pebbledb/comparator.go b/store/v2/storage/pebbledb/comparator.go deleted file mode 100644 index 24f5e05a6214..000000000000 --- a/store/v2/storage/pebbledb/comparator.go +++ /dev/null @@ -1,242 +0,0 @@ -package pebbledb - -import ( - "bytes" - "encoding/binary" - "fmt" - - "github.com/cockroachdb/pebble" -) - -// MVCCComparer returns a PebbleDB Comparer with encoding and decoding routines -// for MVCC control, used to compare and store versioned keys. -// -// Note: This Comparer implementation is largely based on PebbleDB's internal -// MVCC example, which can be found here: -// https://github.com/cockroachdb/pebble/blob/master/cmd/pebble/mvcc.go -var MVCCComparer = &pebble.Comparer{ - Name: "ss_pebbledb_comparator", - - Compare: MVCCKeyCompare, - - AbbreviatedKey: func(k []byte) uint64 { - key, _, ok := SplitMVCCKey(k) - if !ok { - return 0 - } - - return pebble.DefaultComparer.AbbreviatedKey(key) - }, - - Equal: func(a, b []byte) bool { - return MVCCKeyCompare(a, b) == 0 - }, - - Separator: func(dst, a, b []byte) []byte { - aKey, _, ok := SplitMVCCKey(a) - if !ok { - return append(dst, a...) - } - - bKey, _, ok := SplitMVCCKey(b) - if !ok { - return append(dst, a...) - } - - // if the keys are the same just return a - if bytes.Equal(aKey, bKey) { - return append(dst, a...) - } - - n := len(dst) - - // MVCC key comparison uses bytes.Compare on the roachpb.Key, which is the - // same semantics as pebble.DefaultComparer, so reuse the latter's Separator - // implementation. - dst = pebble.DefaultComparer.Separator(dst, aKey, bKey) - - // Did we pick a separator different than aKey? If we did not, we can't do - // better than a. - buf := dst[n:] - if bytes.Equal(aKey, buf) { - return append(dst[:n], a...) - } - - // The separator is > aKey, so we only need to add the timestamp sentinel. - return append(dst, 0) - }, - - ImmediateSuccessor: func(dst, a []byte) []byte { - // The key `a` is guaranteed to be a bare prefix: It's a key without a version - // — just a trailing 0-byte to signify the length of the version. For example - // the user key "foo" is encoded as: "foo\0". We need to encode the immediate - // successor to "foo", which in the natural byte ordering is "foo\0". Append - // a single additional zero, to encode the user key "foo\0" with a zero-length - // version. - return append(append(dst, a...), 0) - }, - - Successor: func(dst, a []byte) []byte { - aKey, _, ok := SplitMVCCKey(a) - if !ok { - return append(dst, a...) - } - - n := len(dst) - - // MVCC key comparison uses bytes.Compare on the roachpb.Key, which is the - // same semantics as pebble.DefaultComparer, so reuse the latter's Successor - // implementation. - dst = pebble.DefaultComparer.Successor(dst, aKey) - - // Did we pick a successor different than aKey? If we did not, we can't do - // better than a. - buf := dst[n:] - if bytes.Equal(aKey, buf) { - return append(dst[:n], a...) - } - - // The successor is > aKey, so we only need to add the timestamp sentinel. - return append(dst, 0) - }, - - FormatKey: func(k []byte) fmt.Formatter { - return mvccKeyFormatter{key: k} - }, - - Split: func(k []byte) int { - key, _, ok := SplitMVCCKey(k) - if !ok { - return len(k) - } - - // This matches the behavior of libroach/KeyPrefix. RocksDB requires that - // keys generated via a SliceTransform be comparable with normal encoded - // MVCC keys. Encoded MVCC keys have a suffix indicating the number of - // bytes of timestamp data. MVCC keys without a timestamp have a suffix of - // 0. We're careful in EncodeKey to make sure that the user-key always has - // a trailing 0. If there is no timestamp this falls out naturally. If - // there is a timestamp we prepend a 0 to the encoded timestamp data. - return len(key) + 1 - }, -} - -type mvccKeyFormatter struct { - key []byte -} - -func (f mvccKeyFormatter) Format(s fmt.State, verb rune) { - k, vBz, ok := SplitMVCCKey(f.key) - if ok { - v, _ := decodeUint64Ascending(vBz) - fmt.Fprintf(s, "%s/%d", k, v) - } else { - fmt.Fprintf(s, "%s", f.key) - } -} - -// SplitMVCCKey accepts an MVCC key and returns the "user" key, the MVCC version, -// and a boolean indicating if the provided key is an MVCC key. -// -// Note, internally, we must make a copy of the provided mvccKey argument, which -// typically comes from the Key() method as it's not safe. -func SplitMVCCKey(mvccKey []byte) (key, version []byte, ok bool) { - if len(mvccKey) == 0 { - return nil, nil, false - } - - mvccKeyCopy := bytes.Clone(mvccKey) - - n := len(mvccKeyCopy) - 1 - tsLen := int(mvccKeyCopy[n]) - if n < tsLen { - return nil, nil, false - } - - key = mvccKeyCopy[:n-tsLen] - if tsLen > 0 { - version = mvccKeyCopy[n-tsLen+1 : n] - } - - return key, version, true -} - -// MVCCKeyCompare compares two MVCC keys. -func MVCCKeyCompare(a, b []byte) int { - aEnd := len(a) - 1 - bEnd := len(b) - 1 - if aEnd < 0 || bEnd < 0 { - // This should never happen unless there is some sort of corruption of - // the keys. This is a little bizarre, but the behavior exactly matches - // engine/db.cc:DBComparator. - return bytes.Compare(a, b) - } - - // Compute the index of the separator between the key and the timestamp. - aSep := aEnd - int(a[aEnd]) - bSep := bEnd - int(b[bEnd]) - if aSep < 0 || bSep < 0 { - // This should never happen unless there is some sort of corruption of - // the keys. This is a little bizarre, but the behavior exactly matches - // engine/db.cc:DBComparator. - return bytes.Compare(a, b) - } - - // compare the "user key" part of the key - if c := bytes.Compare(a[:aSep], b[:bSep]); c != 0 { - return c - } - - // compare the timestamp part of the key - aTS := a[aSep:aEnd] - bTS := b[bSep:bEnd] - if len(aTS) == 0 { - if len(bTS) == 0 { - return 0 - } - return -1 - } else if len(bTS) == 0 { - return 1 - } - - return bytes.Compare(aTS, bTS) -} - -// MVCCEncode encodes a key and version into an MVCC format. -// The format is: \x00[]<#version-bytes> -// If the version is 0, only the key and a null byte are encoded. -func MVCCEncode(key []byte, version uint64) (dst []byte) { - dst = append(dst, key...) - dst = append(dst, 0) - - if version != 0 { - extra := byte(1 + 8) - dst = encodeUint64Ascending(dst, version) - dst = append(dst, extra) - } - - return dst -} - -// encodeUint64Ascending encodes the uint64 value using a big-endian 8 byte -// representation. The bytes are appended to the supplied buffer and -// the final buffer is returned. -func encodeUint64Ascending(dst []byte, v uint64) []byte { - return append( - dst, - byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), - byte(v>>24), byte(v>>16), byte(v>>8), byte(v), - ) -} - -// decodeUint64Ascending decodes a uint64 from the input buffer, treating -// the input as a big-endian 8 byte uint64 representation. The decoded uint64 is -// returned. -func decodeUint64Ascending(b []byte) (uint64, error) { - if len(b) < 8 { - return 0, fmt.Errorf("insufficient bytes to decode uint64 int value; expected 8; got %d", len(b)) - } - - v := binary.BigEndian.Uint64(b) - return v, nil -} diff --git a/store/v2/storage/pebbledb/comparator_test.go b/store/v2/storage/pebbledb/comparator_test.go deleted file mode 100644 index 1affd81b408c..000000000000 --- a/store/v2/storage/pebbledb/comparator_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package pebbledb - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestMVCCKey(t *testing.T) { - for i := uint64(1); i < 1001; i++ { - keyA := MVCCEncode([]byte("key001"), i) - - key, vBz, ok := SplitMVCCKey(keyA) - - version, err := decodeUint64Ascending(vBz) - require.NoError(t, err) - require.True(t, ok) - require.Equal(t, i, version) - require.Equal(t, []byte("key001"), key) - } -} - -func TestMVCCKeyCompare(t *testing.T) { - testCases := []struct { - keyA []byte - keyB []byte - expected int - }{ - { - // same key, same version - keyA: MVCCEncode([]byte("key001"), 1), - keyB: MVCCEncode([]byte("key001"), 1), - expected: 0, - }, - { - // same key, different version - keyA: MVCCEncode([]byte("key001"), 1), - keyB: MVCCEncode([]byte("key001"), 2), - expected: -1, - }, - { - // same key, different version (inverse) - keyA: MVCCEncode([]byte("key001"), 2), - keyB: MVCCEncode([]byte("key001"), 1), - expected: 1, - }, - { - // different key, same version - keyA: MVCCEncode([]byte("key001"), 1), - keyB: MVCCEncode([]byte("key009"), 1), - expected: -1, - }, - } - - for _, tc := range testCases { - require.Equalf(t, tc.expected, MVCCKeyCompare(tc.keyA, tc.keyB), "keyA: %s, keyB: %s", tc.keyA, tc.keyB) - } -} diff --git a/store/v2/storage/pebbledb/db.go b/store/v2/storage/pebbledb/db.go deleted file mode 100644 index 20fc3f11c7f1..000000000000 --- a/store/v2/storage/pebbledb/db.go +++ /dev/null @@ -1,528 +0,0 @@ -package pebbledb - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "math" - "slices" - - "github.com/cockroachdb/pebble" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - storeerrors "cosmossdk.io/store/v2/errors" - "cosmossdk.io/store/v2/internal/encoding" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/util" -) - -const ( - VersionSize = 8 - // PruneCommitBatchSize defines the size, in number of key/value pairs, to prune - // in a single batch. - PruneCommitBatchSize = 50 - // batchBufferSize defines the maximum size of a batch before it is committed. - batchBufferSize = 100_000 - - StorePrefixTpl = "s/k:%s/" // s/k: - removedStoreKeyPrefix = "s/_removed_key" // NB: removedStoreKeys key must be lexically smaller than StorePrefixTpl - latestVersionKey = "s/_latest" // NB: latestVersionKey key must be lexically smaller than StorePrefixTpl - pruneHeightKey = "s/_prune_height" // NB: pruneHeightKey key must be lexically smaller than StorePrefixTpl - tombstoneVal = "TOMBSTONE" -) - -var ( - _ storage.Database = (*Database)(nil) - _ store.UpgradableDatabase = (*Database)(nil) -) - -type Database struct { - storage *pebble.DB - - // earliestVersion defines the earliest version set in the database, which is - // only updated when the database is pruned. - earliestVersion uint64 - - // Sync is whether to sync writes through the OS buffer cache and down onto - // the actual disk, if applicable. Setting Sync is required for durability of - // individual write operations but can result in slower writes. - // - // If false, and the process or machine crashes, then a recent write may be - // lost. This is due to the recently written data being buffered inside the - // process running Pebble. This differs from the semantics of a write system - // call in which the data is buffered in the OS buffer cache and would thus - // survive a process crash. - sync bool -} - -func New(dataDir string) (*Database, error) { - opts := &pebble.Options{ - Comparer: MVCCComparer, - } - opts = opts.EnsureDefaults() - - db, err := pebble.Open(dataDir, opts) - if err != nil { - return nil, fmt.Errorf("failed to open PebbleDB: %w", err) - } - - earliestVersion, err := getEarliestVersion(db) - if err != nil { - return nil, fmt.Errorf("failed to get the earliest version: %w", err) - } - - return &Database{ - storage: db, - earliestVersion: earliestVersion, - sync: true, - }, nil -} - -func NewWithDB(storage *pebble.DB, sync bool) *Database { - earliestVersion, err := getEarliestVersion(storage) - if err != nil { - panic(fmt.Errorf("failed to get the earliest version: %w", err)) - } - - return &Database{ - storage: storage, - earliestVersion: earliestVersion, - sync: sync, - } -} - -func (db *Database) SetSync(sync bool) { - db.sync = sync -} - -func (db *Database) Close() error { - err := db.storage.Close() - db.storage = nil - return err -} - -func (db *Database) NewBatch(version uint64) (store.Batch, error) { - b, err := NewBatch(db.storage, version, db.sync) - if err != nil { - return nil, err - } - - return b, nil -} - -func (db *Database) SetLatestVersion(version uint64) error { - var ts [VersionSize]byte - binary.LittleEndian.PutUint64(ts[:], version) - - return db.storage.Set([]byte(latestVersionKey), ts[:], &pebble.WriteOptions{Sync: db.sync}) -} - -func (db *Database) GetLatestVersion() (uint64, error) { - bz, closer, err := db.storage.Get([]byte(latestVersionKey)) - if err != nil { - if errors.Is(err, pebble.ErrNotFound) { - // in case of a fresh database - return 0, nil - } - - return 0, err - } - - if len(bz) == 0 { - return 0, closer.Close() - } - - return binary.LittleEndian.Uint64(bz), closer.Close() -} - -func (db *Database) VersionExists(version uint64) (bool, error) { - latestVersion, err := db.GetLatestVersion() - if err != nil { - return false, err - } - - return latestVersion >= version && version >= db.earliestVersion, nil -} - -func (db *Database) setPruneHeight(pruneVersion uint64) error { - db.earliestVersion = pruneVersion + 1 - - var ts [VersionSize]byte - binary.LittleEndian.PutUint64(ts[:], pruneVersion) - - return db.storage.Set([]byte(pruneHeightKey), ts[:], &pebble.WriteOptions{Sync: db.sync}) -} - -func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - val, err := db.Get(storeKey, version, key) - if err != nil { - return false, err - } - - return val != nil, nil -} - -func (db *Database) Get(storeKey []byte, targetVersion uint64, key []byte) ([]byte, error) { - if targetVersion < db.earliestVersion { - return nil, storeerrors.ErrVersionPruned{EarliestVersion: db.earliestVersion, RequestedVersion: targetVersion} - } - - prefixedVal, err := getMVCCSlice(db.storage, storeKey, key, targetVersion) - if err != nil { - if errors.Is(err, storeerrors.ErrRecordNotFound) { - return nil, nil - } - - return nil, fmt.Errorf("failed to perform PebbleDB read: %w", err) - } - - valBz, tombBz, ok := SplitMVCCKey(prefixedVal) - if !ok { - return nil, fmt.Errorf("invalid PebbleDB MVCC value: %s", prefixedVal) - } - - // A tombstone of zero or a target version that is less than the tombstone - // version means the key is not deleted at the target version. - if len(tombBz) == 0 { - return valBz, nil - } - - tombstone, err := decodeUint64Ascending(tombBz) - if err != nil { - return nil, fmt.Errorf("failed to decode value tombstone: %w", err) - } - - // A tombstone of zero or a target version that is less than the tombstone - // version means the key is not deleted at the target version. - if targetVersion < tombstone { - return valBz, nil - } - - // the value is considered deleted - return nil, nil -} - -// Prune removes all versions of all keys that are <= the given version. -// -// Note, the implementation of this method is inefficient and can be potentially -// time consuming given the size of the database and when the last pruning occurred -// (if any). This is because the implementation iterates over all keys in the -// database in order to delete them. -// -// See: https://github.com/cockroachdb/cockroach/blob/33623e3ee420174a4fd3226d1284b03f0e3caaac/pkg/storage/mvcc.go#L3182 -func (db *Database) Prune(version uint64) (err error) { - itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: []byte("s/k:")}) - if err != nil { - return err - } - defer itr.Close() - - batch := db.storage.NewBatch() - defer func() { - err = errors.Join(err, batch.Close()) - }() - - var ( - batchCounter int - prevKey, prevKeyPrefixed, prevPrefixedVal []byte - prevKeyVersion uint64 - ) - - for itr.First(); itr.Valid(); { - prefixedKey := slices.Clone(itr.Key()) - - keyBz, verBz, ok := SplitMVCCKey(prefixedKey) - if !ok { - return fmt.Errorf("invalid PebbleDB MVCC key: %s", prefixedKey) - } - - var keyVersion uint64 - // handle version 0 (no version prefix) - if len(verBz) > 0 { - keyVersion, err = decodeUint64Ascending(verBz) - if err != nil { - return fmt.Errorf("failed to decode key version: %w", err) - } - } - // seek to next key if we are at a version which is higher than prune height - if keyVersion > version { - itr.NextPrefix() - continue - } - - // Delete a key if another entry for that key exists a larger version than - // the original but <= to the prune height. We also delete a key if it has - // been tombstoned and its version is <= to the prune height. - if prevKeyVersion <= version && (bytes.Equal(prevKey, keyBz) || valTombstoned(prevPrefixedVal)) { - if err := batch.Delete(prevKeyPrefixed, nil); err != nil { - return err - } - - batchCounter++ - if batchCounter >= PruneCommitBatchSize { - if err := batch.Commit(&pebble.WriteOptions{Sync: db.sync}); err != nil { - return err - } - - batchCounter = 0 - batch.Reset() - } - } - - prevKey = keyBz - prevKeyVersion = keyVersion - prevKeyPrefixed = prefixedKey - value, err := itr.ValueAndErr() - if err != nil { - return err - } - prevPrefixedVal = slices.Clone(value) - - itr.Next() - } - - // commit any leftover delete ops in batch - if batchCounter > 0 { - if err := batch.Commit(&pebble.WriteOptions{Sync: db.sync}); err != nil { - return err - } - } - - if err := db.deleteRemovedStoreKeys(version); err != nil { - return err - } - - return db.setPruneHeight(version) -} - -func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, storeerrors.ErrStartAfterEnd - } - - lowerBound := MVCCEncode(prependStoreKey(storeKey, start), 0) - - var upperBound []byte - if end != nil { - upperBound = MVCCEncode(prependStoreKey(storeKey, end), 0) - } - - itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: lowerBound, UpperBound: upperBound}) - if err != nil { - return nil, err - } - - return newPebbleDBIterator(itr, storePrefix(storeKey), start, end, version, db.earliestVersion, false), nil -} - -func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, storeerrors.ErrStartAfterEnd - } - - lowerBound := MVCCEncode(prependStoreKey(storeKey, start), 0) - - var upperBound []byte - if end != nil { - upperBound = MVCCEncode(prependStoreKey(storeKey, end), 0) - } - - itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: lowerBound, UpperBound: upperBound}) - if err != nil { - return nil, err - } - - return newPebbleDBIterator(itr, storePrefix(storeKey), start, end, version, db.earliestVersion, true), nil -} - -func (db *Database) PruneStoreKeys(storeKeys []string, version uint64) (err error) { - batch := db.storage.NewBatch() - defer func() { - err = errors.Join(err, batch.Close()) - }() - - for _, storeKey := range storeKeys { - if err := batch.Set([]byte(fmt.Sprintf("%s%s", encoding.BuildPrefixWithVersion(removedStoreKeyPrefix, version), storeKey)), []byte{}, nil); err != nil { - return err - } - } - - return batch.Commit(&pebble.WriteOptions{Sync: db.sync}) -} - -func storePrefix(storeKey []byte) []byte { - return []byte(fmt.Sprintf(StorePrefixTpl, storeKey)) -} - -func prependStoreKey(storeKey, key []byte) []byte { - return []byte(fmt.Sprintf("%s%s", storePrefix(storeKey), key)) -} - -// getEarliestVersion returns the earliest version set in the database. -// It is calculated by prune height + 1. If the prune height is not set, it -// returns 0. -func getEarliestVersion(storage *pebble.DB) (uint64, error) { - bz, closer, err := storage.Get([]byte(pruneHeightKey)) - if err != nil { - if errors.Is(err, pebble.ErrNotFound) { - // in cases where pruning was never triggered - return 0, nil - } - - return 0, err - } - - if len(bz) == 0 { - return 0, closer.Close() - } - - return binary.LittleEndian.Uint64(bz) + 1, closer.Close() -} - -func valTombstoned(value []byte) bool { - if value == nil { - return false - } - - _, tombBz, ok := SplitMVCCKey(value) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC value. - panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", value)) - } - - // If the tombstone suffix is empty, we consider this a zero value and thus it - // is not tombstoned. - if len(tombBz) == 0 { - return false - } - - return true -} - -func getMVCCSlice(db *pebble.DB, storeKey, key []byte, version uint64) ([]byte, error) { - // end domain is exclusive, so we need to increment the version by 1 - if version < math.MaxUint64 { - version++ - } - - itr, err := db.NewIter(&pebble.IterOptions{ - LowerBound: MVCCEncode(prependStoreKey(storeKey, key), 0), - UpperBound: MVCCEncode(prependStoreKey(storeKey, key), version), - }) - if err != nil { - return nil, err - } - - defer itr.Close() - - if !itr.Last() { - return nil, storeerrors.ErrRecordNotFound - } - - _, vBz, ok := SplitMVCCKey(itr.Key()) - if !ok { - return nil, fmt.Errorf("invalid PebbleDB MVCC key: %s", itr.Key()) - } - - var keyVersion uint64 - // handle version 0 (no version prefix) - if len(vBz) > 0 { - keyVersion, err = decodeUint64Ascending(vBz) - if err != nil { - return nil, fmt.Errorf("failed to decode key version: %w", err) - } - } - if keyVersion > version { - return nil, fmt.Errorf("key version too large: %d", keyVersion) - } - - value, err := itr.ValueAndErr() - return slices.Clone(value), err -} - -func (db *Database) deleteRemovedStoreKeys(version uint64) (err error) { - batch := db.storage.NewBatch() - defer func() { - err = errors.Join(err, batch.Close()) - }() - - end := encoding.BuildPrefixWithVersion(removedStoreKeyPrefix, version+1) - storeKeyIter, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: []byte(removedStoreKeyPrefix), UpperBound: end}) - if err != nil { - return err - } - defer storeKeyIter.Close() - - storeKeys := make(map[string]uint64) - prefixLen := len(end) - for storeKeyIter.First(); storeKeyIter.Valid(); storeKeyIter.Next() { - verBz := storeKeyIter.Key()[len(removedStoreKeyPrefix):prefixLen] - v, err := decodeUint64Ascending(verBz) - if err != nil { - return err - } - storeKey := string(storeKeyIter.Key()[prefixLen:]) - if ev, ok := storeKeys[storeKey]; ok { - if ev < v { - storeKeys[storeKey] = v - } - } else { - storeKeys[storeKey] = v - } - if err := batch.Delete(storeKeyIter.Key(), nil); err != nil { - return err - } - } - - for storeKey, v := range storeKeys { - if err := func() error { - storeKey := []byte(storeKey) - itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: storePrefix(storeKey), UpperBound: storePrefix(util.CopyIncr(storeKey))}) - if err != nil { - return err - } - defer itr.Close() - - for itr.First(); itr.Valid(); itr.Next() { - itrKey := itr.Key() - _, verBz, ok := SplitMVCCKey(itrKey) - if !ok { - return fmt.Errorf("invalid PebbleDB MVCC key: %s", itrKey) - } - keyVersion, err := decodeUint64Ascending(verBz) - if err != nil { - return err - } - if keyVersion > v { - // skip keys that are newer than the version - continue - } - if err := batch.Delete(itr.Key(), nil); err != nil { - return err - } - if batch.Len() >= batchBufferSize { - if err := batch.Commit(&pebble.WriteOptions{Sync: db.sync}); err != nil { - return err - } - batch.Reset() - } - } - return nil - }(); err != nil { - return err - } - } - - return batch.Commit(&pebble.WriteOptions{Sync: true}) -} diff --git a/store/v2/storage/pebbledb/db_test.go b/store/v2/storage/pebbledb/db_test.go deleted file mode 100644 index 0ef4c8ca9f25..000000000000 --- a/store/v2/storage/pebbledb/db_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package pebbledb - -import ( - "testing" - - "github.com/stretchr/testify/suite" - - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2/storage" -) - -func TestStorageTestSuite(t *testing.T) { - s := &storage.StorageTestSuite{ - NewDB: func(dir string) (*storage.StorageStore, error) { - db, err := New(dir) - if err == nil && db != nil { - // We set sync=false just to speed up CI tests. Operators should take - // careful consideration when setting this value in production environments. - db.SetSync(false) - } - - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - EmptyBatchSize: 12, - } - - suite.Run(t, s) -} diff --git a/store/v2/storage/pebbledb/iterator.go b/store/v2/storage/pebbledb/iterator.go deleted file mode 100644 index 2401ab4ef000..000000000000 --- a/store/v2/storage/pebbledb/iterator.go +++ /dev/null @@ -1,437 +0,0 @@ -package pebbledb - -import ( - "bytes" - "fmt" - "slices" - - "github.com/cockroachdb/pebble" - - corestore "cosmossdk.io/core/store" -) - -var _ corestore.Iterator = (*iterator)(nil) - -// iterator implements the store.Iterator interface. It wraps a PebbleDB iterator -// with added MVCC key handling logic. The iterator will iterate over the key space -// in the provided domain for a given version. If a key has been written at the -// provided version, that key/value pair will be iterated over. Otherwise, the -// latest version for that key/value pair will be iterated over s.t. it's less -// than the provided version. -type iterator struct { - source *pebble.Iterator - prefix, start, end []byte - version uint64 - valid bool - reverse bool -} - -func newPebbleDBIterator(src *pebble.Iterator, prefix, mvccStart, mvccEnd []byte, version, earliestVersion uint64, reverse bool) *iterator { - if version < earliestVersion { - return &iterator{ - source: src, - prefix: prefix, - start: mvccStart, - end: mvccEnd, - version: version, - valid: false, - reverse: reverse, - } - } - - // move the underlying PebbleDB iterator to the first key - var valid bool - if reverse { - valid = src.Last() - } else { - valid = src.First() - } - - itr := &iterator{ - source: src, - prefix: prefix, - start: mvccStart, - end: mvccEnd, - version: version, - valid: valid, - reverse: reverse, - } - - if valid { - currKey, currKeyVersion, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC value. - panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Key())) - } - - curKeyVersionDecoded, err := decodeUint64Ascending(currKeyVersion) - if err != nil { - itr.valid = false - return itr - } - - // We need to check whether initial key iterator visits has a version <= requested - // version. If larger version, call next to find another key which does. - if curKeyVersionDecoded > itr.version { - itr.Next() - } else { - // If version is less, seek to the largest version of that key <= requested - // iterator version. It is guaranteed this won't move the iterator to a key - // that is invalid since curKeyVersionDecoded <= requested iterator version, - // so there exists at least one version of currKey SeekLT may move to. - itr.valid = itr.source.SeekLT(MVCCEncode(currKey, itr.version+1)) - } - - // The cursor might now be pointing at a key/value pair that is tombstoned. - // If so, we must move the cursor. - if itr.valid && itr.cursorTombstoned() { - itr.Next() - } - } - return itr -} - -// Domain returns the domain of the iterator. The caller must not modify the -// return values. -func (itr *iterator) Domain() ([]byte, []byte) { - return itr.start, itr.end -} - -func (itr *iterator) Key() []byte { - itr.assertIsValid() - - key, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) - } - - keyCopy := slices.Clone(key) - return keyCopy[len(itr.prefix):] -} - -func (itr *iterator) Value() []byte { - itr.assertIsValid() - - val, _, ok := SplitMVCCKey(itr.source.Value()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC value. - panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Key())) - } - - return slices.Clone(val) -} - -func (itr *iterator) Next() { - if itr.reverse { - itr.nextReverse() - } else { - itr.nextForward() - } -} - -func (itr *iterator) Valid() bool { - // once invalid, forever invalid - if !itr.valid || !itr.source.Valid() { - itr.valid = false - return itr.valid - } - - // if source has error, consider it invalid - if err := itr.source.Error(); err != nil { - itr.valid = false - return itr.valid - } - - // if key is at the end or past it, consider it invalid - if end := itr.end; end != nil { - if bytes.Compare(end, itr.Key()) <= 0 { - itr.valid = false - return itr.valid - } - } - - return true -} - -func (itr *iterator) Error() error { - return itr.source.Error() -} - -func (itr *iterator) Close() error { - err := itr.source.Close() - itr.source = nil - itr.valid = false - - return err -} - -func (itr *iterator) assertIsValid() { - if !itr.valid { - panic("iterator is invalid") - } -} - -// cursorTombstoned checks if the current cursor is pointing at a key/value pair -// that is tombstoned. If the cursor is tombstoned, is returned, otherwise -// is returned. In the case where the iterator is valid but the key/value -// pair is tombstoned, the caller should call Next(). Note, this method assumes -// the caller assures the iterator is valid first! -func (itr *iterator) cursorTombstoned() bool { - _, tombBz, ok := SplitMVCCKey(itr.source.Value()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC value. - panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Key())) - } - - // If the tombstone suffix is empty, we consider this a zero value and thus it - // is not tombstoned. - if len(tombBz) == 0 { - return false - } - - // If the tombstone suffix is non-empty and greater than the target version, - // the value is not tombstoned. - tombstone, err := decodeUint64Ascending(tombBz) - if err != nil { - panic(fmt.Errorf("failed to decode value tombstone: %w", err)) - } - if tombstone > itr.version { - return false - } - - return true -} - -func (itr *iterator) DebugRawIterate() { - valid := itr.source.Valid() - if valid { - // The first key may not represent the desired target version, so move the - // cursor to the correct location. - firstKey, _, _ := SplitMVCCKey(itr.source.Key()) - valid = itr.source.SeekLT(MVCCEncode(firstKey, itr.version+1)) - } - - var err error - for valid { - key, vBz, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) - } - - var version uint64 - // handle version 0 (no version prefix) - if len(vBz) > 0 { - version, err = decodeUint64Ascending(vBz) - if err != nil { - panic(fmt.Errorf("failed to decode key version: %w", err)) - } - } - - val, tombBz, ok := SplitMVCCKey(itr.source.Value()) - if !ok { - panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Value())) - } - - var tombstone uint64 - if len(tombBz) > 0 { - tombstone, err = decodeUint64Ascending(vBz) - if err != nil { - panic(fmt.Errorf("failed to decode value tombstone: %w", err)) - } - } - - fmt.Printf("KEY: %s, VALUE: %s, VERSION: %d, TOMBSTONE: %d\n", key, val, version, tombstone) - - var next bool - if itr.reverse { - next = itr.source.SeekLT(MVCCEncode(key, 0)) - } else { - next = itr.source.NextPrefix() - } - - if next { - nextKey, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) - } - - // the next key must have itr.prefix as the prefix - if !bytes.HasPrefix(nextKey, itr.prefix) { - valid = false - } else { - valid = itr.source.SeekLT(MVCCEncode(nextKey, itr.version+1)) - } - } else { - valid = false - } - } -} - -func (itr *iterator) nextForward() { - if !itr.source.Valid() { - itr.valid = false - return - } - - currKey, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) - } - - next := itr.source.NextPrefix() - - // First move the iterator to the next prefix, which may not correspond to the - // desired version for that key, e.g. if the key was written at a later version, - // so we seek back to the latest desired version, s.t. the version is <= itr.version. - if next { - nextKey, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - itr.valid = false - return - } - - if !bytes.HasPrefix(nextKey, itr.prefix) { - // the next key must have itr.prefix as the prefix - itr.valid = false - return - } - - // Move the iterator to the closest version to the desired version, so we - // append the current iterator key to the prefix and seek to that key. - itr.valid = itr.source.SeekLT(MVCCEncode(nextKey, itr.version+1)) - - tmpKey, tmpKeyVersion, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - itr.valid = false - return - } - - // There exists cases where the SeekLT() call moved us back to the same key - // we started at, so we must move to next key, i.e. two keys forward. - if bytes.Equal(tmpKey, currKey) { - if itr.source.NextPrefix() { - itr.nextForward() - - _, tmpKeyVersion, ok = SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - itr.valid = false - return - } - - } else { - itr.valid = false - return - } - } - - // We need to verify that every Next call either moves the iterator to a key - // whose version is less than or equal to requested iterator version, or - // exhausts the iterator. - tmpKeyVersionDecoded, err := decodeUint64Ascending(tmpKeyVersion) - if err != nil { - itr.valid = false - return - } - - // If iterator is at a entry whose version is higher than requested version, - // call nextForward again. - if tmpKeyVersionDecoded > itr.version { - itr.nextForward() - } - - // The cursor might now be pointing at a key/value pair that is tombstoned. - // If so, we must move the cursor. - if itr.valid && itr.cursorTombstoned() { - itr.nextForward() - } - - return - } - - itr.valid = false -} - -func (itr *iterator) nextReverse() { - if !itr.source.Valid() { - itr.valid = false - return - } - - currKey, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) - } - - next := itr.source.SeekLT(MVCCEncode(currKey, 0)) - - // First move the iterator to the next prefix, which may not correspond to the - // desired version for that key, e.g. if the key was written at a later version, - // so we seek back to the latest desired version, s.t. the version is <= itr.version. - if next { - nextKey, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - itr.valid = false - return - } - - if !bytes.HasPrefix(nextKey, itr.prefix) { - // the next key must have itr.prefix as the prefix - itr.valid = false - return - } - - // Move the iterator to the closest version to the desired version, so we - // append the current iterator key to the prefix and seek to that key. - itr.valid = itr.source.SeekLT(MVCCEncode(nextKey, itr.version+1)) - - _, tmpKeyVersion, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - itr.valid = false - return - } - - // We need to verify that every Next call either moves the iterator to a key - // whose version is less than or equal to requested iterator version, or - // exhausts the iterator. - tmpKeyVersionDecoded, err := decodeUint64Ascending(tmpKeyVersion) - if err != nil { - itr.valid = false - return - } - - // If iterator is at a entry whose version is higher than requested version, - // call nextReverse again. - if tmpKeyVersionDecoded > itr.version { - itr.nextReverse() - } - - // The cursor might now be pointing at a key/value pair that is tombstoned. - // If so, we must move the cursor. - if itr.valid && itr.cursorTombstoned() { - itr.nextReverse() - } - - return - } - - itr.valid = false -} diff --git a/store/v2/storage/rocksdb/batch.go b/store/v2/storage/rocksdb/batch.go deleted file mode 100644 index 826b81778a87..000000000000 --- a/store/v2/storage/rocksdb/batch.go +++ /dev/null @@ -1,67 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "encoding/binary" - - "github.com/linxGnu/grocksdb" - - "cosmossdk.io/store/v2" -) - -var _ store.Batch = (*Batch)(nil) - -type Batch struct { - version uint64 - ts [TimestampSize]byte - storage *grocksdb.DB - cfHandle *grocksdb.ColumnFamilyHandle - batch *grocksdb.WriteBatch -} - -// NewBatch creates a new versioned batch used for batch writes. The caller -// must ensure to call Write() on the returned batch to commit the changes and to -// destroy the batch when done. -func NewBatch(db *Database, version uint64) Batch { - var ts [TimestampSize]byte - binary.LittleEndian.PutUint64(ts[:], version) - - batch := grocksdb.NewWriteBatch() - batch.Put([]byte(latestVersionKey), ts[:]) - - return Batch{ - version: version, - ts: ts, - storage: db.storage, - cfHandle: db.cfHandle, - batch: batch, - } -} - -func (b Batch) Size() int { - return len(b.batch.Data()) -} - -func (b Batch) Reset() error { - b.batch.Clear() - return nil -} - -func (b Batch) Set(storeKey, key, value []byte) error { - prefixedKey := prependStoreKey(storeKey, key) - b.batch.PutCFWithTS(b.cfHandle, prefixedKey, b.ts[:], value) - return nil -} - -func (b Batch) Delete(storeKey, key []byte) error { - prefixedKey := prependStoreKey(storeKey, key) - b.batch.DeleteCFWithTS(b.cfHandle, prefixedKey, b.ts[:]) - return nil -} - -func (b Batch) Write() error { - defer b.batch.Destroy() - return b.storage.Write(defaultWriteOpts, b.batch) -} diff --git a/store/v2/storage/rocksdb/comparator.go b/store/v2/storage/rocksdb/comparator.go deleted file mode 100644 index 5da27d9121f9..000000000000 --- a/store/v2/storage/rocksdb/comparator.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "bytes" - "encoding/binary" - - "github.com/linxGnu/grocksdb" -) - -// CreateTSComparator should behavior identical with RocksDB builtin timestamp comparator. -// We also use the same builtin comparator name so the builtin tools `ldb`/`sst_dump` -// can work with the database. -func CreateTSComparator() *grocksdb.Comparator { - return grocksdb.NewComparatorWithTimestamp( - "leveldb.BytewiseComparator.u64ts", - TimestampSize, - compare, - compareTS, - compareWithoutTS, - ) -} - -// compareTS compares timestamp as little endian encoded integers. -// -// NOTICE: The behavior must be identical to RocksDB builtin comparator -// "leveldb.BytewiseComparator.u64ts". -func compareTS(bz1, bz2 []byte) int { - ts1 := binary.LittleEndian.Uint64(bz1) - ts2 := binary.LittleEndian.Uint64(bz2) - - switch { - case ts1 < ts2: - return -1 - - case ts1 > ts2: - return 1 - - default: - return 0 - } -} - -// compare compares two internal keys with timestamp suffix, larger timestamp -// comes first. -// -// NOTICE: The behavior must be identical to RocksDB builtin comparator -// "leveldb.BytewiseComparator.u64ts". -func compare(a, b []byte) int { - ret := compareWithoutTS(a, true, b, true) - if ret != 0 { - return ret - } - - // Compare timestamp. For the same user key with different timestamps, larger - // (newer) timestamp comes first, which means seek operation will try to find - // a version less than or equal to the target version. - return -compareTS(a[len(a)-TimestampSize:], b[len(b)-TimestampSize:]) -} - -// compareWithoutTS compares two internal keys without the timestamp part. -// -// NOTICE: the behavior must be identical to RocksDB builtin comparator -// "leveldb.BytewiseComparator.u64ts". -func compareWithoutTS(a []byte, aHasTS bool, b []byte, bHasTS bool) int { - if aHasTS { - a = a[:len(a)-TimestampSize] - } - if bHasTS { - b = b[:len(b)-TimestampSize] - } - - return bytes.Compare(a, b) -} diff --git a/store/v2/storage/rocksdb/db.go b/store/v2/storage/rocksdb/db.go deleted file mode 100644 index 248b014f7b4e..000000000000 --- a/store/v2/storage/rocksdb/db.go +++ /dev/null @@ -1,251 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "bytes" - "encoding/binary" - "fmt" - "slices" - - "github.com/linxGnu/grocksdb" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/errors" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/util" -) - -const ( - TimestampSize = 8 - - StorePrefixTpl = "s/k:%s/" - latestVersionKey = "s/latest" -) - -var ( - _ storage.Database = (*Database)(nil) - _ store.UpgradableDatabase = (*Database)(nil) - - defaultWriteOpts = grocksdb.NewDefaultWriteOptions() - defaultReadOpts = grocksdb.NewDefaultReadOptions() -) - -type Database struct { - storage *grocksdb.DB - cfHandle *grocksdb.ColumnFamilyHandle - - // tsLow reflects the full_history_ts_low CF value, which is earliest version - // supported - tsLow uint64 -} - -func New(dataDir string) (*Database, error) { - storage, cfHandle, err := OpenRocksDB(dataDir) - if err != nil { - return nil, fmt.Errorf("failed to open RocksDB: %w", err) - } - - slice, err := storage.GetFullHistoryTsLow(cfHandle) - if err != nil { - return nil, fmt.Errorf("failed to get full_history_ts_low: %w", err) - } - - var tsLow uint64 - tsLowBz := copyAndFreeSlice(slice) - if len(tsLowBz) > 0 { - tsLow = binary.LittleEndian.Uint64(tsLowBz) - } - - return &Database{ - storage: storage, - cfHandle: cfHandle, - tsLow: tsLow, - }, nil -} - -func NewWithDB(storage *grocksdb.DB, cfHandle *grocksdb.ColumnFamilyHandle) (*Database, error) { - slice, err := storage.GetFullHistoryTsLow(cfHandle) - if err != nil { - return nil, fmt.Errorf("failed to get full_history_ts_low: %w", err) - } - - var tsLow uint64 - tsLowBz := copyAndFreeSlice(slice) - if len(tsLowBz) > 0 { - tsLow = binary.LittleEndian.Uint64(tsLowBz) - } - - return &Database{ - storage: storage, - cfHandle: cfHandle, - tsLow: tsLow, - }, nil -} - -func (db *Database) Close() error { - db.storage.Close() - - db.storage = nil - db.cfHandle = nil - - return nil -} - -func (db *Database) NewBatch(version uint64) (store.Batch, error) { - return NewBatch(db, version), nil -} - -func (db *Database) getSlice(storeKey []byte, version uint64, key []byte) (*grocksdb.Slice, error) { - if version < db.tsLow { - return nil, errors.ErrVersionPruned{EarliestVersion: db.tsLow, RequestedVersion: version} - } - - return db.storage.GetCF( - newTSReadOptions(version), - db.cfHandle, - prependStoreKey(storeKey, key), - ) -} - -func (db *Database) SetLatestVersion(version uint64) error { - var ts [TimestampSize]byte - binary.LittleEndian.PutUint64(ts[:], version) - - return db.storage.Put(defaultWriteOpts, []byte(latestVersionKey), ts[:]) -} - -func (db *Database) GetLatestVersion() (uint64, error) { - bz, err := db.storage.GetBytes(defaultReadOpts, []byte(latestVersionKey)) - if err != nil { - return 0, err - } - - if len(bz) == 0 { - // in case of a fresh database - return 0, nil - } - - return binary.LittleEndian.Uint64(bz), nil -} - -func (db *Database) VersionExists(version uint64) (bool, error) { - latestVersion, err := db.GetLatestVersion() - if err != nil { - return false, err - } - - return latestVersion >= version && version >= db.tsLow, nil -} - -func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - slice, err := db.getSlice(storeKey, version, key) - if err != nil { - return false, err - } - - return slice.Exists(), nil -} - -func (db *Database) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) { - slice, err := db.getSlice(storeKey, version, key) - if err != nil { - return nil, fmt.Errorf("failed to get RocksDB slice: %w", err) - } - - return copyAndFreeSlice(slice), nil -} - -// Prune prunes all versions up to and including the provided version argument. -// Internally, this performs a manual compaction, the data with older timestamp -// will be GCed by compaction. -func (db *Database) Prune(version uint64) error { - tsLow := version + 1 // we increment by 1 to include the provided version - - var ts [TimestampSize]byte - binary.LittleEndian.PutUint64(ts[:], tsLow) - compactOpts := grocksdb.NewCompactRangeOptions() - compactOpts.SetFullHistoryTsLow(ts[:]) - db.storage.CompactRangeCFOpt(db.cfHandle, grocksdb.Range{}, compactOpts) - - db.tsLow = tsLow - return nil -} - -func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, errors.ErrStartAfterEnd - } - - prefix := storePrefix(storeKey) - start, end = util.IterateWithPrefix(prefix, start, end) - - itr := db.storage.NewIteratorCF(newTSReadOptions(version), db.cfHandle) - return newRocksDBIterator(itr, prefix, start, end, false), nil -} - -func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, errors.ErrStartAfterEnd - } - - prefix := storePrefix(storeKey) - start, end = util.IterateWithPrefix(prefix, start, end) - - itr := db.storage.NewIteratorCF(newTSReadOptions(version), db.cfHandle) - return newRocksDBIterator(itr, prefix, start, end, true), nil -} - -// PruneStoreKeys will do nothing for RocksDB, it will be pruned by compaction -// when the version is pruned -func (db *Database) PruneStoreKeys(_ []string, _ uint64) error { - return nil -} - -// newTSReadOptions returns ReadOptions used in the RocksDB column family read. -func newTSReadOptions(version uint64) *grocksdb.ReadOptions { - var ts [TimestampSize]byte - binary.LittleEndian.PutUint64(ts[:], version) - - readOpts := grocksdb.NewDefaultReadOptions() - readOpts.SetTimestamp(ts[:]) - - return readOpts -} - -func storePrefix(storeKey []byte) []byte { - return []byte(fmt.Sprintf(StorePrefixTpl, storeKey)) -} - -func prependStoreKey(storeKey, key []byte) []byte { - return []byte(fmt.Sprintf("%s%s", storePrefix(storeKey), key)) -} - -// copyAndFreeSlice will copy a given RocksDB slice and free it. If the slice does -// not exist, will be returned. -func copyAndFreeSlice(s *grocksdb.Slice) []byte { - defer s.Free() - if !s.Exists() { - return nil - } - - return slices.Clone(s.Data()) -} - -func readOnlySlice(s *grocksdb.Slice) []byte { - if !s.Exists() { - return nil - } - - return s.Data() -} diff --git a/store/v2/storage/rocksdb/db_noflag.go b/store/v2/storage/rocksdb/db_noflag.go deleted file mode 100644 index 93bc3090f284..000000000000 --- a/store/v2/storage/rocksdb/db_noflag.go +++ /dev/null @@ -1,70 +0,0 @@ -//go:build !rocksdb -// +build !rocksdb - -package rocksdb - -import ( - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/storage" -) - -var ( - _ storage.Database = (*Database)(nil) - _ store.UpgradableDatabase = (*Database)(nil) -) - -type Database struct{} - -func New(dataDir string) (*Database, error) { - return &Database{}, nil -} - -func (db *Database) Close() error { - return nil -} - -func (db *Database) NewBatch(version uint64) (store.Batch, error) { - panic("rocksdb requires a build flag") -} - -func (db *Database) SetLatestVersion(version uint64) error { - panic("rocksdb requires a build flag") -} - -func (db *Database) GetLatestVersion() (uint64, error) { - panic("rocksdb requires a build flag") -} - -func (db *Database) VersionExists(version uint64) (bool, error) { - panic("rocksdb requires a build flag") -} - -func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - panic("rocksdb requires a build flag") -} - -func (db *Database) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) { - panic("rocksdb requires a build flag") -} - -// Prune prunes all versions up to and including the provided version argument. -// Internally, this performs a manual compaction, the data with older timestamp -// will be GCed by compaction. -func (db *Database) Prune(version uint64) error { - panic("rocksdb requires a build flag") -} - -func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - panic("rocksdb requires a build flag") -} - -func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - panic("rocksdb requires a build flag") -} - -// PruneStoreKeys will do nothing for RocksDB, it will be pruned by compaction -// when the version is pruned -func (db *Database) PruneStoreKeys(_ []string, _ uint64) error { - return nil -} diff --git a/store/v2/storage/rocksdb/db_test.go b/store/v2/storage/rocksdb/db_test.go deleted file mode 100644 index a77afbb3a8fd..000000000000 --- a/store/v2/storage/rocksdb/db_test.go +++ /dev/null @@ -1,90 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2/storage" -) - -var storeKey1 = []byte("store1") - -func TestStorageTestSuite(t *testing.T) { - s := &storage.StorageTestSuite{ - NewDB: func(dir string) (*storage.StorageStore, error) { - db, err := New(dir) - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - EmptyBatchSize: 12, - SkipTests: []string{"TestUpgradable_Prune"}, - } - suite.Run(t, s) -} - -func TestDatabase_ReverseIterator(t *testing.T) { - db, err := New(t.TempDir()) - require.NoError(t, err) - defer db.Close() - - batch := NewBatch(db, 1) - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - require.NoError(t, batch.Set(storeKey1, []byte(key), []byte(val))) - } - - require.NoError(t, batch.Write()) - - // reverse iterator without an end key - iter, err := db.ReverseIterator(storeKey1, 1, []byte("key000"), nil) - require.NoError(t, err) - - defer iter.Close() - - i, count := 99, 0 - for ; iter.Valid(); iter.Next() { - require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter.Key()) - require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter.Value()) - - i-- - count++ - } - require.Equal(t, 100, count) - require.NoError(t, iter.Error()) - - // seek past domain, which should make the iterator invalid and produce an error - require.False(t, iter.Valid()) - - // reverse iterator with a start and end domain - iter2, err := db.ReverseIterator(storeKey1, 1, []byte("key010"), []byte("key019")) - require.NoError(t, err) - - defer iter2.Close() - - i, count = 18, 0 - for ; iter2.Valid(); iter2.Next() { - require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter2.Key()) - require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter2.Value()) - - i-- - count++ - } - require.Equal(t, 9, count) - require.NoError(t, iter2.Error()) - - // seek past domain, which should make the iterator invalid and produce an error - require.False(t, iter2.Valid()) - - // start must be <= end - iter3, err := db.ReverseIterator(storeKey1, 1, []byte("key020"), []byte("key019")) - require.Error(t, err) - require.Nil(t, iter3) -} diff --git a/store/v2/storage/rocksdb/iterator.go b/store/v2/storage/rocksdb/iterator.go deleted file mode 100644 index 9a09dc92c5c8..000000000000 --- a/store/v2/storage/rocksdb/iterator.go +++ /dev/null @@ -1,159 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "bytes" - - "github.com/linxGnu/grocksdb" - - corestore "cosmossdk.io/core/store" -) - -var _ corestore.Iterator = (*iterator)(nil) - -type iterator struct { - source *grocksdb.Iterator - prefix, start, end []byte - reverse bool - invalid bool -} - -func newRocksDBIterator(source *grocksdb.Iterator, prefix, start, end []byte, reverse bool) *iterator { - if reverse { - if end == nil { - source.SeekToLast() - } else { - source.Seek(end) - - if source.Valid() { - eoaKey := readOnlySlice(source.Key()) // end or after key - if bytes.Compare(end, eoaKey) <= 0 { - source.Prev() - } - } else { - source.SeekToLast() - } - } - } else { - if start == nil { - source.SeekToFirst() - } else { - source.Seek(start) - } - } - - return &iterator{ - source: source, - prefix: prefix, - start: start, - end: end, - reverse: reverse, - invalid: !source.Valid(), - } -} - -// Domain returns the domain of the iterator. The caller must not modify the -// return values. -func (itr *iterator) Domain() ([]byte, []byte) { - start := itr.start - if start != nil { - start = start[len(itr.prefix):] - if len(start) == 0 { - start = nil - } - } - - end := itr.end - if end != nil { - end = end[len(itr.prefix):] - if len(end) == 0 { - end = nil - } - } - - return start, end -} - -func (itr *iterator) Valid() bool { - // once invalid, forever invalid - if itr.invalid { - return false - } - - // if source has error, consider it invalid - if err := itr.source.Err(); err != nil { - itr.invalid = true - return false - } - - // if source is invalid, consider it invalid - if !itr.source.Valid() { - itr.invalid = true - return false - } - - // if key is at the end or past it, consider it invalid - start := itr.start - end := itr.end - key := readOnlySlice(itr.source.Key()) - - if itr.reverse { - if start != nil && bytes.Compare(key, start) < 0 { - itr.invalid = true - return false - } - } else { - if end != nil && bytes.Compare(end, key) <= 0 { - itr.invalid = true - return false - } - } - - return true -} - -func (itr *iterator) Key() []byte { - itr.assertIsValid() - return copyAndFreeSlice(itr.source.Key())[len(itr.prefix):] -} - -func (itr *iterator) Value() []byte { - itr.assertIsValid() - return copyAndFreeSlice(itr.source.Value()) -} - -func (itr *iterator) Timestamp() []byte { - return itr.source.Timestamp().Data() -} - -func (itr iterator) Next() { - if itr.invalid { - return - } - - if itr.reverse { - itr.source.Prev() - } else { - itr.source.Next() - } -} - -func (itr *iterator) Error() error { - return itr.source.Err() -} - -func (itr *iterator) Close() error { - itr.source.Close() - itr.source = nil - itr.invalid = true - - return nil -} - -func (itr *iterator) assertIsValid() { - if itr.invalid { - panic("iterator is invalid") - } -} diff --git a/store/v2/storage/rocksdb/opts.go b/store/v2/storage/rocksdb/opts.go deleted file mode 100644 index bf2272c17c21..000000000000 --- a/store/v2/storage/rocksdb/opts.go +++ /dev/null @@ -1,125 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "encoding/binary" - "runtime" - - "github.com/linxGnu/grocksdb" -) - -const ( - // CFNameStateStorage defines the RocksDB column family name for versioned state - // storage. - CFNameStateStorage = "state_storage" - - // CFNameDefault defines the RocksDB column family name for the default column. - CFNameDefault = "default" -) - -// NewRocksDBOpts returns the options used for the RocksDB column family for use -// in state storage. -// -// FIXME: We do not enable dict compression for SSTFileWriter, because otherwise -// the file writer won't report correct file size. -// Ref: https://github.com/facebook/rocksdb/issues/11146 -func NewRocksDBOpts(sstFileWriter bool) *grocksdb.Options { - opts := grocksdb.NewDefaultOptions() - opts.SetCreateIfMissing(true) - opts.SetComparator(CreateTSComparator()) - opts.IncreaseParallelism(runtime.NumCPU()) - opts.OptimizeLevelStyleCompaction(512 * 1024 * 1024) - opts.SetTargetFileSizeMultiplier(2) - opts.SetLevelCompactionDynamicLevelBytes(true) - - // block based table options - bbto := grocksdb.NewDefaultBlockBasedTableOptions() - - // 1G block cache - bbto.SetBlockSize(32 * 1024) - bbto.SetBlockCache(grocksdb.NewLRUCache(1 << 30)) - - bbto.SetFilterPolicy(grocksdb.NewRibbonHybridFilterPolicy(9.9, 1)) - bbto.SetIndexType(grocksdb.KBinarySearchWithFirstKey) - bbto.SetOptimizeFiltersForMemory(true) - opts.SetBlockBasedTableFactory(bbto) - - // Improve sst file creation speed: compaction or sst file writer. - opts.SetCompressionOptionsParallelThreads(4) - - if !sstFileWriter { - // compression options at bottommost level - opts.SetBottommostCompression(grocksdb.ZSTDCompression) - - compressOpts := grocksdb.NewDefaultCompressionOptions() - compressOpts.MaxDictBytes = 112640 // 110k - compressOpts.Level = 12 - - opts.SetBottommostCompressionOptions(compressOpts, true) - opts.SetBottommostCompressionOptionsZstdMaxTrainBytes(compressOpts.MaxDictBytes*100, true) - } - - return opts -} - -// OpenRocksDB opens a RocksDB database connection for versioned reading and writing. -// It also returns a column family handle for versioning using user-defined timestamps. -// The default column family is used for metadata, specifically key/value pairs -// that are stored on another column family named with "state_storage", which has -// user-defined timestamp enabled. -func OpenRocksDB(dataDir string) (*grocksdb.DB, *grocksdb.ColumnFamilyHandle, error) { - opts := grocksdb.NewDefaultOptions() - opts.SetCreateIfMissing(true) - opts.SetCreateIfMissingColumnFamilies(true) - - db, cfHandles, err := grocksdb.OpenDbColumnFamilies( - opts, - dataDir, - []string{ - CFNameDefault, - CFNameStateStorage, - }, - []*grocksdb.Options{ - opts, - NewRocksDBOpts(false), - }, - ) - if err != nil { - return nil, nil, err - } - - return db, cfHandles[1], nil -} - -// OpenRocksDBAndTrimHistory opens a RocksDB handle similar to `OpenRocksDB`, -// but it also trims the versions newer than target one, such that it can be used -// for rollback. -func OpenRocksDBAndTrimHistory(dataDir string, version int64) (*grocksdb.DB, *grocksdb.ColumnFamilyHandle, error) { - var ts [TimestampSize]byte - binary.LittleEndian.PutUint64(ts[:], uint64(version)) - - opts := grocksdb.NewDefaultOptions() - opts.SetCreateIfMissing(true) - opts.SetCreateIfMissingColumnFamilies(true) - - db, cfHandles, err := grocksdb.OpenDbAndTrimHistory( - opts, - dataDir, - []string{ - CFNameDefault, - CFNameStateStorage, - }, - []*grocksdb.Options{ - opts, - NewRocksDBOpts(false), - }, - ts[:], - ) - if err != nil { - return nil, nil, err - } - - return db, cfHandles[1], nil -} diff --git a/store/v2/storage/storage_bench_test.go b/store/v2/storage/storage_bench_test.go deleted file mode 100644 index 36de5772ef6a..000000000000 --- a/store/v2/storage/storage_bench_test.go +++ /dev/null @@ -1,182 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package storage_test - -import ( - "bytes" - "fmt" - "math/rand" - "sort" - "testing" - - "github.com/stretchr/testify/require" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/pebbledb" - "cosmossdk.io/store/v2/storage/rocksdb" -) - -var storeKey1 = []byte("store1") - -var ( - backends = map[string]func(dataDir string) (store.VersionedWriter, error){ - "rocksdb_versiondb_opts": func(dataDir string) (store.VersionedWriter, error) { - db, err := rocksdb.New(dataDir) - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - "pebbledb_default_opts": func(dataDir string) (store.VersionedWriter, error) { - db, err := pebbledb.New(dataDir) - if err == nil && db != nil { - db.SetSync(false) - } - - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - } - rng = rand.New(rand.NewSource(567320)) -) - -func BenchmarkGet(b *testing.B) { - numKeyVals := 1_000_000 - keys := make([][]byte, numKeyVals) - vals := make([][]byte, numKeyVals) - for i := 0; i < numKeyVals; i++ { - key := make([]byte, 128) - val := make([]byte, 128) - - _, err := rng.Read(key) - require.NoError(b, err) - _, err = rng.Read(val) - require.NoError(b, err) - - keys[i] = key - vals[i] = val - } - - for ty, fn := range backends { - db, err := fn(b.TempDir()) - require.NoError(b, err) - defer func() { - _ = db.Close() - }() - - cs := corestore.NewChangesetWithPairs(1, map[string]corestore.KVPairs{string(storeKey1): {}}) - for i := 0; i < numKeyVals; i++ { - cs.AddKVPair(storeKey1, corestore.KVPair{Key: keys[i], Value: vals[i]}) - } - - require.NoError(b, db.ApplyChangeset(cs)) - - b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) { - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - key := keys[rng.Intn(len(keys))] - - b.StartTimer() - _, err = db.Get(storeKey1, 1, key) - require.NoError(b, err) - } - }) - } -} - -func BenchmarkApplyChangeset(b *testing.B) { - for ty, fn := range backends { - db, err := fn(b.TempDir()) - require.NoError(b, err) - defer func() { - _ = db.Close() - }() - - b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) { - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - - ver := uint64(b.N + 1) - cs := corestore.NewChangesetWithPairs(ver, map[string]corestore.KVPairs{string(storeKey1): {}}) - for j := 0; j < 1000; j++ { - key := make([]byte, 128) - val := make([]byte, 128) - - _, err = rng.Read(key) - require.NoError(b, err) - _, err = rng.Read(val) - require.NoError(b, err) - - cs.AddKVPair(storeKey1, corestore.KVPair{Key: key, Value: val}) - } - - b.StartTimer() - require.NoError(b, db.ApplyChangeset(cs)) - } - }) - } -} - -func BenchmarkIterate(b *testing.B) { - numKeyVals := 1_000_000 - keys := make([][]byte, numKeyVals) - vals := make([][]byte, numKeyVals) - for i := 0; i < numKeyVals; i++ { - key := make([]byte, 128) - val := make([]byte, 128) - - _, err := rng.Read(key) - require.NoError(b, err) - _, err = rng.Read(val) - require.NoError(b, err) - - keys[i] = key - vals[i] = val - - } - - for ty, fn := range backends { - db, err := fn(b.TempDir()) - require.NoError(b, err) - defer func() { - _ = db.Close() - }() - - b.StopTimer() - - cs := corestore.NewChangesetWithPairs(1, map[string]corestore.KVPairs{string(storeKey1): {}}) - for i := 0; i < numKeyVals; i++ { - cs.AddKVPair(storeKey1, corestore.KVPair{Key: keys[i], Value: vals[i]}) - } - - require.NoError(b, db.ApplyChangeset(cs)) - - sort.Slice(keys, func(i, j int) bool { - return bytes.Compare(keys[i], keys[j]) < 0 - }) - - b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) { - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - - itr, err := db.Iterator(storeKey1, 1, keys[0], nil) - require.NoError(b, err) - - b.StartTimer() - - for ; itr.Valid(); itr.Next() { - _ = itr.Key() - _ = itr.Value() - } - - require.NoError(b, itr.Error()) - } - }) - } -} diff --git a/store/v2/storage/storage_test_suite.go b/store/v2/storage/storage_test_suite.go deleted file mode 100644 index 4d38efe7931e..000000000000 --- a/store/v2/storage/storage_test_suite.go +++ /dev/null @@ -1,1056 +0,0 @@ -package storage - -import ( - "fmt" - "slices" - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" -) - -const ( - storeKey1 = "store1" -) - -var storeKey1Bytes = []byte(storeKey1) - -// StorageTestSuite defines a reusable test suite for all storage backends. -type StorageTestSuite struct { - suite.Suite - - NewDB func(dir string) (*StorageStore, error) - EmptyBatchSize int - SkipTests []string -} - -func (s *StorageTestSuite) TestDatabase_Close() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - s.Require().NoError(db.Close()) - - // close should not be idempotent - s.Require().Panics(func() { _ = db.Close() }) -} - -func (s *StorageTestSuite) TestDatabase_LatestVersion() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - lv, err := db.GetLatestVersion() - s.Require().NoError(err) - s.Require().Zero(lv) - - for i := uint64(1); i <= 1001; i++ { - err = db.SetLatestVersion(i) - s.Require().NoError(err) - - lv, err = db.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(i, lv) - } -} - -func (s *StorageTestSuite) TestDatabase_VersionedKeys() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - for i := uint64(1); i <= 100; i++ { - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - i, - map[string]corestore.KVPairs{ - storeKey1: {{Key: []byte("key"), Value: []byte(fmt.Sprintf("value%03d", i))}}, - }, - ))) - } - - for i := uint64(1); i <= 100; i++ { - bz, err := db.Get(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().Equal(fmt.Sprintf("value%03d", i), string(bz)) - } -} - -func (s *StorageTestSuite) TestDatabase_GetVersionedKey() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - // store a key at version 1 - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 1, - map[string]corestore.KVPairs{ - storeKey1: {{Key: []byte("key"), Value: []byte("value001")}}, - }, - ))) - - // assume chain progresses to version 10 w/o any changes to key - bz, err := db.Get(storeKey1Bytes, 10, []byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("value001"), bz) - - ok, err := db.Has(storeKey1Bytes, 10, []byte("key")) - s.Require().NoError(err) - s.Require().True(ok) - - // chain progresses to version 11 with an update to key - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 11, - map[string]corestore.KVPairs{ - storeKey1: {{Key: []byte("key"), Value: []byte("value011")}}, - }, - ))) - - bz, err = db.Get(storeKey1Bytes, 10, []byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("value001"), bz) - - ok, err = db.Has(storeKey1Bytes, 10, []byte("key")) - s.Require().NoError(err) - s.Require().True(ok) - - for i := uint64(11); i <= 14; i++ { - bz, err = db.Get(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("value011"), bz) - - ok, err = db.Has(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().True(ok) - } - - // chain progresses to version 15 with a delete to key - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 15, - map[string]corestore.KVPairs{storeKey1: {{Key: []byte("key"), Remove: true}}}, - ))) - - // all queries up to version 14 should return the latest value - for i := uint64(1); i <= 14; i++ { - bz, err = db.Get(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().NotNil(bz) - - ok, err = db.Has(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().True(ok) - } - - // all queries after version 15 should return nil - for i := uint64(15); i <= 17; i++ { - bz, err = db.Get(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().Nil(bz) - - ok, err = db.Has(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().False(ok) - } -} - -func (s *StorageTestSuite) TestDatabase_ApplyChangeset() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - cs := corestore.NewChangesetWithPairs(1, map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 100; i++ { - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(fmt.Sprintf("key%03d", i)), Value: []byte("value")}) - } - - for i := 0; i < 100; i++ { - if i%10 == 0 { - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(fmt.Sprintf("key%03d", i)), Remove: true}) - } - } - - s.Require().NoError(db.ApplyChangeset(cs)) - - lv, err := db.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(1), lv) - - for i := 0; i < 1; i++ { - ok, err := db.Has(storeKey1Bytes, 1, []byte(fmt.Sprintf("key%03d", i))) - s.Require().NoError(err) - - if i%10 == 0 { - s.Require().False(ok) - } else { - s.Require().True(ok) - } - } -} - -func (s *StorageTestSuite) TestDatabase_IteratorEmptyDomain() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - iter, err := db.Iterator(storeKey1Bytes, 1, []byte{}, []byte{}) - s.Require().Error(err) - s.Require().Nil(iter) -} - -func (s *StorageTestSuite) TestDatabase_IteratorClose() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - iter, err := db.Iterator(storeKey1Bytes, 1, []byte("key000"), nil) - s.Require().NoError(err) - iter.Close() - - s.Require().False(iter.Valid()) -} - -func (s *StorageTestSuite) TestDatabase_IteratorDomain() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - testCases := map[string]struct { - start, end []byte - }{ - "start without end domain": { - start: []byte("key010"), - }, - "start and end domain": { - start: []byte("key010"), - end: []byte("key020"), - }, - } - - for name, tc := range testCases { - s.Run(name, func() { - iter, err := db.Iterator(storeKey1Bytes, 1, tc.start, tc.end) - s.Require().NoError(err) - - defer iter.Close() - - start, end := iter.Domain() - s.Require().Equal(tc.start, start) - s.Require().Equal(tc.end, end) - }) - } -} - -func (s *StorageTestSuite) TestDatabase_Iterator() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - cs := corestore.NewChangesetWithPairs(1, map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val), Remove: false}) - } - - s.Require().NoError(db.ApplyChangeset(cs)) - - // iterator without an end key over multiple versions - for v := uint64(1); v < 5; v++ { - itr, err := db.Iterator(storeKey1Bytes, v, []byte("key000"), nil) - s.Require().NoError(err) - - var i, count int - for ; itr.Valid(); itr.Next() { - s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) - s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) - - i++ - count++ - } - s.Require().NoError(itr.Error()) - s.Require().Equal(100, count) - - // seek past domain, which should make the iterator invalid and produce an error - s.Require().False(itr.Valid()) - - err = itr.Close() - s.Require().NoError(err, "Failed to close iterator") - } - - // iterator with a start and end domain over multiple versions - for v := uint64(1); v < 5; v++ { - itr2, err := db.Iterator(storeKey1Bytes, v, []byte("key010"), []byte("key019")) - s.Require().NoError(err) - - i, count := 10, 0 - for ; itr2.Valid(); itr2.Next() { - s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr2.Key()) - s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr2.Value()) - - i++ - count++ - } - s.Require().Equal(9, count) - s.Require().NoError(itr2.Error()) - - // seek past domain, which should make the iterator invalid and produce an error - s.Require().False(itr2.Valid()) - - err = itr2.Close() - if err != nil { - return - } - } - - // start must be <= end - iter3, err := db.Iterator(storeKey1Bytes, 1, []byte("key020"), []byte("key019")) - s.Require().Error(err) - s.Require().Nil(iter3) -} - -func (s *StorageTestSuite) TestDatabase_Iterator_RangedDeletes() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 1, - map[string]corestore.KVPairs{ - storeKey1: { - {Key: []byte("key001"), Value: []byte("value001"), Remove: false}, - {Key: []byte("key002"), Value: []byte("value001"), Remove: false}, - }, - }, - ))) - - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 5, - map[string]corestore.KVPairs{ - storeKey1: {{Key: []byte("key002"), Value: []byte("value002"), Remove: false}}, - }, - ))) - - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 10, - map[string]corestore.KVPairs{ - storeKey1: {{Key: []byte("key002"), Remove: true}}, - }, - ))) - - itr, err := db.Iterator(storeKey1Bytes, 11, []byte("key001"), nil) - s.Require().NoError(err) - - defer itr.Close() - - // there should only be one valid key in the iterator -- key001 - var count int - for ; itr.Valid(); itr.Next() { - s.Require().Equal([]byte("key001"), itr.Key()) - count++ - } - s.Require().Equal(1, count) - s.Require().NoError(itr.Error()) -} - -func (s *StorageTestSuite) TestDatabase_IteratorMultiVersion() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - // for versions 1-49, set all 10 keys - for v := uint64(1); v < 50; v++ { - cs := corestore.NewChangesetWithPairs(v, map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val)}) - } - - s.Require().NoError(db.ApplyChangeset(cs)) - } - - // for versions 50-100, only update even keys - for v := uint64(50); v <= 100; v++ { - cs := corestore.NewChangesetWithPairs(v, map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 10; i++ { - if i%2 == 0 { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val), Remove: false}) - } - } - - s.Require().NoError(db.ApplyChangeset(cs)) - } - - itr, err := db.Iterator(storeKey1Bytes, 69, []byte("key000"), nil) - s.Require().NoError(err) - - defer itr.Close() - - // All keys should be present; All odd keys should have a value that reflects - // version 49, and all even keys should have a value that reflects the desired - // version, 69. - var i, count int - for ; itr.Valid(); itr.Next() { - s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) - - if i%2 == 0 { - s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, 69)), itr.Value()) - } else { - s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, 49)), itr.Value()) - } - - i = (i + 1) % 10 - count++ - } - - s.Require().NoError(itr.Error()) - s.Require().Equal(10, count) -} - -func (s *StorageTestSuite) TestDatabaseIterator_SkipVersion() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - - defer db.Close() - - dbApplyChangeset(s.T(), db, 58827506, storeKey1, [][]byte{[]byte("keyC")}, [][]byte{[]byte("value003")}) - dbApplyChangeset(s.T(), db, 58827506, storeKey1, [][]byte{[]byte("keyE")}, [][]byte{[]byte("value000")}) - dbApplyChangeset(s.T(), db, 58827506, storeKey1, [][]byte{[]byte("keyF")}, [][]byte{[]byte("value000")}) - dbApplyChangeset(s.T(), db, 58833605, storeKey1, [][]byte{[]byte("keyC")}, [][]byte{[]byte("value004")}) - dbApplyChangeset(s.T(), db, 58833606, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value006")}) - - itr, err := db.Iterator(storeKey1Bytes, 58831525, []byte("key"), nil) - s.Require().NoError(err) - defer itr.Close() - - count := make(map[string]struct{}) - for ; itr.Valid(); itr.Next() { - count[string(itr.Key())] = struct{}{} - } - - s.Require().NoError(itr.Error()) - s.Require().Equal(3, len(count)) -} - -func (s *StorageTestSuite) TestDatabaseIterator_ForwardIteration() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - dbApplyChangeset(s.T(), db, 8, storeKey1, [][]byte{[]byte("keyA")}, [][]byte{[]byte("value001")}) - dbApplyChangeset(s.T(), db, 9, storeKey1, [][]byte{[]byte("keyB")}, [][]byte{[]byte("value002")}) - dbApplyChangeset(s.T(), db, 10, storeKey1, [][]byte{[]byte("keyC")}, [][]byte{[]byte("value003")}) - dbApplyChangeset(s.T(), db, 11, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value004")}) - - dbApplyChangeset(s.T(), db, 2, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value007")}) - dbApplyChangeset(s.T(), db, 3, storeKey1, [][]byte{[]byte("keyE")}, [][]byte{[]byte("value008")}) - dbApplyChangeset(s.T(), db, 4, storeKey1, [][]byte{[]byte("keyF")}, [][]byte{[]byte("value009")}) - dbApplyChangeset(s.T(), db, 5, storeKey1, [][]byte{[]byte("keyH")}, [][]byte{[]byte("value010")}) - - itr, err := db.Iterator(storeKey1Bytes, 6, nil, []byte("keyZ")) - s.Require().NoError(err) - - defer itr.Close() - count := 0 - for ; itr.Valid(); itr.Next() { - count++ - } - - s.Require().NoError(itr.Error()) - s.Require().Equal(4, count) -} - -func (s *StorageTestSuite) TestDatabaseIterator_ForwardIterationHigher() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - dbApplyChangeset(s.T(), db, 9, storeKey1, [][]byte{[]byte("keyB")}, [][]byte{[]byte("value002")}) - dbApplyChangeset(s.T(), db, 10, storeKey1, [][]byte{[]byte("keyC")}, [][]byte{[]byte("value003")}) - dbApplyChangeset(s.T(), db, 11, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value004")}) - - dbApplyChangeset(s.T(), db, 12, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value007")}) - dbApplyChangeset(s.T(), db, 13, storeKey1, [][]byte{[]byte("keyE")}, [][]byte{[]byte("value008")}) - dbApplyChangeset(s.T(), db, 14, storeKey1, [][]byte{[]byte("keyF")}, [][]byte{[]byte("value009")}) - dbApplyChangeset(s.T(), db, 15, storeKey1, [][]byte{[]byte("keyH")}, [][]byte{[]byte("value010")}) - - itr, err := db.Iterator(storeKey1Bytes, 6, nil, []byte("keyZ")) - s.Require().NoError(err) - - defer itr.Close() - - count := 0 - for ; itr.Valid(); itr.Next() { - count++ - } - - s.Require().Equal(0, count) -} - -func (s *StorageTestSuite) TestDatabaseIterator_WithDelete() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - dbApplyChangeset(s.T(), db, 1, storeKey1, [][]byte{[]byte("keyA")}, [][]byte{[]byte("value001")}) - dbApplyChangeset(s.T(), db, 2, storeKey1, [][]byte{[]byte("keyA")}, [][]byte{nil}) // delete - - itr, err := db.Iterator(storeKey1Bytes, 1, nil, nil) - s.Require().NoError(err) - - count := 0 - for ; itr.Valid(); itr.Next() { - count++ - } - s.Require().Equal(1, count) - - itr, err = db.Iterator(storeKey1Bytes, 2, nil, nil) - s.Require().NoError(err) - - count = 0 - for ; itr.Valid(); itr.Next() { - count++ - } - s.Require().Equal(0, count) -} - -func (s *StorageTestSuite) TestDatabase_IteratorNoDomain() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - // for versions 1-50, set all 10 keys - for v := uint64(1); v <= 50; v++ { - cs := corestore.NewChangesetWithPairs(v, map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val), Remove: false}) - } - - s.Require().NoError(db.ApplyChangeset(cs)) - } - - // create an iterator over the entire domain - itr, err := db.Iterator(storeKey1Bytes, 50, nil, nil) - s.Require().NoError(err) - - defer itr.Close() - - var i, count int - for ; itr.Valid(); itr.Next() { - s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) - s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, 50)), itr.Value()) - - i++ - count++ - } - s.Require().NoError(itr.Error()) - s.Require().Equal(10, count) -} - -func (s *StorageTestSuite) TestDatabase_Prune() { - if slices.Contains(s.SkipTests, s.T().Name()) { - s.T().SkipNow() - } - - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - // for versions 1-50, set 10 keys - for v := uint64(1); v <= 50; v++ { - cs := corestore.NewChangesetWithPairs(v, map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val)}) - } - - s.Require().NoError(db.ApplyChangeset(cs)) - } - - // prune the first 25 versions - s.Require().NoError(db.Prune(25)) - - latestVersion, err := db.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(50), latestVersion) - - // Ensure all keys are no longer present up to and including version 25 and - // all keys are present after version 25. - for v := uint64(1); v <= 50; v++ { - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - bz, err := db.Get(storeKey1Bytes, v, []byte(key)) - if v <= 25 { - s.Require().Error(err) - s.Require().Nil(bz) - } else { - s.Require().NoError(err) - s.Require().Equal([]byte(val), bz) - } - } - } - - itr, err := db.Iterator(storeKey1Bytes, 25, []byte("key000"), nil) - s.Require().NoError(err) - s.Require().False(itr.Valid()) - - // prune the latest version which should prune the entire dataset - s.Require().NoError(db.Prune(50)) - - for v := uint64(1); v <= 50; v++ { - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) - - bz, err := db.Get(storeKey1Bytes, v, []byte(key)) - s.Require().Error(err) - s.Require().Nil(bz) - } - } -} - -func (s *StorageTestSuite) TestDatabase_Prune_KeepRecent() { - if slices.Contains(s.SkipTests, s.T().Name()) { - s.T().SkipNow() - } - - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - key := []byte("key") - - // write a key at three different versions - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 1, - map[string]corestore.KVPairs{storeKey1: {{Key: key, Value: []byte("val001"), Remove: false}}}, - ))) - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 100, - map[string]corestore.KVPairs{storeKey1: {{Key: key, Value: []byte("val100"), Remove: false}}}, - ))) - s.Require().NoError(db.ApplyChangeset(corestore.NewChangesetWithPairs( - 200, - map[string]corestore.KVPairs{storeKey1: {{Key: key, Value: []byte("val200"), Remove: false}}}, - ))) - - // prune version 50 - s.Require().NoError(db.Prune(50)) - - // ensure queries for versions 50 and older return nil - bz, err := db.Get(storeKey1Bytes, 49, key) - s.Require().Error(err) - s.Require().Nil(bz) - - itr, err := db.Iterator(storeKey1Bytes, 49, nil, nil) - s.Require().NoError(err) - s.Require().False(itr.Valid()) - - defer itr.Close() - - // ensure the value previously at version 1 is still there for queries greater than 50 - bz, err = db.Get(storeKey1Bytes, 51, key) - s.Require().NoError(err) - s.Require().Equal([]byte("val001"), bz) - - // ensure the correct value at a greater height - bz, err = db.Get(storeKey1Bytes, 200, key) - s.Require().NoError(err) - s.Require().Equal([]byte("val200"), bz) - - // prune latest height and ensure we have the previous version when querying above it - s.Require().NoError(db.Prune(200)) - - bz, err = db.Get(storeKey1Bytes, 201, key) - s.Require().NoError(err) - s.Require().Equal([]byte("val200"), bz) -} - -func (s *StorageTestSuite) TestDatabase_Restore() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - toVersion := uint64(10) - keyCount := 10 - - // for versions 1-10, set 10 keys - for v := uint64(1); v <= toVersion; v++ { - cs := corestore.NewChangesetWithPairs(v, map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < keyCount; i++ { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val)}) - } - - s.Require().NoError(db.ApplyChangeset(cs)) - } - - latestVersion, err := db.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(10), latestVersion) - - chStorage := make(chan *corestore.StateChanges, 5) - - go func() { - for i := uint64(11); i <= 15; i++ { - kvPairs := []corestore.KVPair{} - for j := 0; j < keyCount; j++ { - key := fmt.Sprintf("key%03d-%03d", j, i) - val := fmt.Sprintf("val%03d-%03d", j, i) - - kvPairs = append(kvPairs, corestore.KVPair{Key: []byte(key), Value: []byte(val)}) - } - chStorage <- &corestore.StateChanges{ - Actor: storeKey1Bytes, - StateChanges: kvPairs, - } - } - close(chStorage) - }() - - // restore with snapshot version smaller than latest version - // should return an error - err = db.Restore(9, chStorage) - s.Require().Error(err) - - // restore - err = db.Restore(11, chStorage) - s.Require().NoError(err) - - // check the storage - for i := uint64(11); i <= 15; i++ { - for j := 0; j < keyCount; j++ { - key := fmt.Sprintf("key%03d-%03d", j, i) - val := fmt.Sprintf("val%03d-%03d", j, i) - - v, err := db.Get(storeKey1Bytes, 11, []byte(key)) - s.Require().NoError(err) - s.Require().Equal([]byte(val), v) - } - } -} - -func (s *StorageTestSuite) TestUpgradable() { - ss, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer ss.Close() - - // Ensure the database is upgradable. - if _, ok := ss.db.(store.UpgradableDatabase); !ok { - s.T().Skip("database is not upgradable") - } - - storeKeys := []string{"store1", "store2", "store3"} - uptoVersion := uint64(50) - keyCount := 10 - for _, storeKey := range storeKeys { - for v := uint64(1); v <= uptoVersion; v++ { - keys := make([][]byte, keyCount) - vals := make([][]byte, keyCount) - for i := 0; i < keyCount; i++ { - keys[i] = []byte(fmt.Sprintf("key%03d", i)) - vals[i] = []byte(fmt.Sprintf("val%03d-%03d", i, v)) - } - dbApplyChangeset(s.T(), ss, v, storeKey, keys, vals) - } - } - - // prune storekeys (`store2`, `store3`) - removedStoreKeys := []string{storeKeys[1], storeKeys[2]} - err = ss.PruneStoreKeys(removedStoreKeys, uptoVersion) - s.Require().NoError(err) - // should be able to query before Prune for removed storeKeys - for _, storeKey := range removedStoreKeys { - for v := uint64(1); v <= uptoVersion; v++ { - for i := 0; i < keyCount; i++ { - bz, err := ss.Get([]byte(storeKey), v, []byte(fmt.Sprintf("key%03d", i))) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, v)), bz) - } - } - } - s.Require().NoError(ss.Prune(uptoVersion)) - // should not be able to query after Prune - // skip the test of RocksDB - if !slices.Contains(s.SkipTests, "TestUpgradable_Prune") { - for _, storeKey := range removedStoreKeys { - // it will return error ErrVersionPruned - for v := uint64(1); v <= uptoVersion; v++ { - for i := 0; i < keyCount; i++ { - _, err := ss.Get([]byte(storeKey), v, []byte(fmt.Sprintf("key%03d", i))) - s.Require().Error(err) - } - } - v := uptoVersion + 1 - for i := 0; i < keyCount; i++ { - val, err := ss.Get([]byte(storeKey), v, []byte(fmt.Sprintf("key%03d", i))) - s.Require().NoError(err) - s.Require().Nil(val) - } - } - } -} - -func (s *StorageTestSuite) TestRemovingOldStoreKey() { - ss, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer ss.Close() - - // Ensure the database is upgradable. - if _, ok := ss.db.(store.UpgradableDatabase); !ok { - s.T().Skip("database is not upgradable") - } - - storeKeys := []string{"store1", "store2", "store3"} - uptoVersion := uint64(50) - keyCount := 10 - for _, storeKey := range storeKeys { - for v := uint64(1); v <= uptoVersion; v++ { - keys := make([][]byte, keyCount) - vals := make([][]byte, keyCount) - for i := 0; i < keyCount; i++ { - keys[i] = []byte(fmt.Sprintf("key%03d-%03d", i, v)) - vals[i] = []byte(fmt.Sprintf("val%03d-%03d", i, v)) - } - dbApplyChangeset(s.T(), ss, v, storeKey, keys, vals) - } - } - - // remove `store1` and `store3` - removedStoreKeys := []string{storeKeys[0], storeKeys[2]} - err = ss.PruneStoreKeys(removedStoreKeys, uptoVersion) - s.Require().NoError(err) - // should be able to query before Prune for removed storeKeys - for _, storeKey := range removedStoreKeys { - for v := uint64(1); v <= uptoVersion; v++ { - for i := 0; i < keyCount; i++ { - bz, err := ss.Get([]byte(storeKey), v, []byte(fmt.Sprintf("key%03d-%03d", i, v))) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, v)), bz) - } - } - } - // add `store1` back - newStoreKeys := []string{storeKeys[0], storeKeys[1]} - newVersion := uptoVersion + 10 - for _, storeKey := range newStoreKeys { - for v := uptoVersion + 1; v <= newVersion; v++ { - keys := make([][]byte, keyCount) - vals := make([][]byte, keyCount) - for i := 0; i < keyCount; i++ { - keys[i] = []byte(fmt.Sprintf("key%03d-%03d", i, v)) - vals[i] = []byte(fmt.Sprintf("val%03d-%03d", i, v)) - } - dbApplyChangeset(s.T(), ss, v, storeKey, keys, vals) - } - } - - s.Require().NoError(ss.Prune(newVersion)) - // skip the test of RocksDB - if !slices.Contains(s.SkipTests, "TestUpgradable_Prune") { - for _, storeKey := range removedStoreKeys { - queryVersion := newVersion + 1 - // should not be able to query after Prune during 1 ~ uptoVersion - for v := uint64(1); v <= uptoVersion; v++ { - for i := 0; i < keyCount; i++ { - val, err := ss.Get([]byte(storeKey), queryVersion, []byte(fmt.Sprintf("key%03d", i))) - s.Require().NoError(err) - s.Require().Nil(val) - } - } - // should be able to query after Prune during uptoVersion + 1 ~ newVersion - // for `store1` added back - for v := uptoVersion + 1; v <= newVersion; v++ { - for i := 0; i < keyCount; i++ { - val, err := ss.Get([]byte(storeKey), queryVersion, []byte(fmt.Sprintf("key%03d-%03d", i, v))) - s.Require().NoError(err) - if storeKey == storeKeys[0] { - // `store1` is added back - s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, v)), val) - } else { - // `store3` is removed - s.Require().Nil(val) - } - } - } - } - } -} - -// TestVersionExists tests the VersionExists method of the Database struct. -func (s *StorageTestSuite) TestVersionExists() { - // Define test cases - testCases := []struct { - name string - setup func(t *testing.T, db *StorageStore) - version uint64 - expectedExists bool - expectError bool - }{ - { - name: "Fresh database: version 0 exists", - setup: func(t *testing.T, db *StorageStore) { - t.Helper() - // No setup needed for fresh database - }, - version: 0, - expectedExists: true, - expectError: false, - }, - { - name: "Fresh database: version 1 exists", - setup: func(t *testing.T, db *StorageStore) { - t.Helper() - // No setup needed for fresh database - }, - version: 1, - expectedExists: false, - expectError: false, - }, - { - name: "After setting latest version to 10, version 5 exists", - setup: func(t *testing.T, db *StorageStore) { - t.Helper() - err := db.SetLatestVersion(10) - if err != nil { - t.Fatalf("Setting latest version should not error: %v", err) - } - }, - version: 5, - expectedExists: true, // Since pruning hasn't occurred, earliestVersion is still 0 - expectError: false, - }, - { - name: "After setting latest version to 10 and pruning to 5, version 4 does not exist", - setup: func(t *testing.T, db *StorageStore) { - t.Helper() - err := db.SetLatestVersion(10) - if err != nil { - t.Fatalf("Setting latest version should not error: %v", err) - } - - err = db.Prune(5) - if err != nil { - t.Fatalf("Pruning to version 5 should not error: %v", err) - } - }, - version: 4, - expectedExists: false, - expectError: false, - }, - { - name: "After setting latest version to 10 and pruning to 5, version 5 does not exist", - setup: func(t *testing.T, db *StorageStore) { - t.Helper() - err := db.SetLatestVersion(10) - if err != nil { - t.Fatalf("Setting latest version should not error: %v", err) - } - - err = db.Prune(5) - if err != nil { - t.Fatalf("Pruning to version 5 should not error: %v", err) - } - }, - version: 5, - expectedExists: false, - expectError: false, - }, - { - name: "After setting latest version to 10 and pruning to 5, version 6 exists", - setup: func(t *testing.T, db *StorageStore) { - t.Helper() - err := db.SetLatestVersion(10) - if err != nil { - t.Fatalf("Setting latest version should not error: %v", err) - } - - err = db.Prune(5) - if err != nil { - t.Fatalf("Pruning to version 5 should not error: %v", err) - } - }, - version: 6, - expectedExists: true, - expectError: false, - }, - { - name: "After pruning to 0, all versions >=1 exist", - setup: func(t *testing.T, db *StorageStore) { - t.Helper() - err := db.SetLatestVersion(10) - if err != nil { - t.Fatalf("Setting latest version should not error: %v", err) - } - // Prune to version 0 - err = db.Prune(0) - if err != nil { - t.Fatalf("Pruning to version 0 should not error: %v", err) - } - }, - version: 1, - expectedExists: true, - expectError: false, - }, - } - - // Iterate over each test case - for _, tc := range testCases { - s.T().Run(tc.name, func(t *testing.T) { - // Initialize the database for each test - db, err := s.NewDB(t.TempDir()) - require.NoError(t, err, "Failed to initialize the database") - defer db.Close() - - // Setup test environment - tc.setup(t, db) - - // Call VersionExists and check the result - exists, err := db.VersionExists(tc.version) - if tc.expectError { - require.Error(t, err, "Expected error but got none") - } else { - require.NoError(t, err, "Did not expect an error but got one") - require.Equal(t, tc.expectedExists, exists, "Version existence mismatch") - } - }) - } -} - -func dbApplyChangeset( - t *testing.T, - db store.VersionedWriter, - version uint64, - storeKey string, - keys, vals [][]byte, -) { - t.Helper() - - require.Greater(t, version, uint64(0)) - require.Equal(t, len(keys), len(vals)) - - cs := corestore.NewChangeset(version) - for i := 0; i < len(keys); i++ { - remove := false - if vals[i] == nil { - remove = true - } - - cs.AddKVPair([]byte(storeKey), corestore.KVPair{Key: keys[i], Value: vals[i], Remove: remove}) - } - - require.NoError(t, db.ApplyChangeset(cs)) -} diff --git a/store/v2/storage/store.go b/store/v2/storage/store.go deleted file mode 100644 index d53e323774ea..000000000000 --- a/store/v2/storage/store.go +++ /dev/null @@ -1,162 +0,0 @@ -package storage - -import ( - "errors" - "fmt" - - "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/snapshots" -) - -const ( - // TODO: it is a random number, need to be tuned - defaultBatchBufferSize = 100000 -) - -var ( - _ store.VersionedWriter = (*StorageStore)(nil) - _ snapshots.StorageSnapshotter = (*StorageStore)(nil) - _ store.Pruner = (*StorageStore)(nil) - _ store.UpgradableDatabase = (*StorageStore)(nil) -) - -// StorageStore is a wrapper around the store.VersionedWriter interface. -type StorageStore struct { - logger log.Logger - db Database -} - -// NewStorageStore returns a reference to a new StorageStore. -func NewStorageStore(db Database, logger log.Logger) *StorageStore { - return &StorageStore{ - logger: logger, - db: db, - } -} - -// Has returns true if the key exists in the store. -func (ss *StorageStore) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - return ss.db.Has(storeKey, version, key) -} - -// Get returns the value associated with the given key. -func (ss *StorageStore) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) { - return ss.db.Get(storeKey, version, key) -} - -// ApplyChangeset applies the given changeset to the storage. -func (ss *StorageStore) ApplyChangeset(cs *corestore.Changeset) error { - b, err := ss.db.NewBatch(cs.Version) - if err != nil { - return err - } - - for _, pairs := range cs.Changes { - for _, kvPair := range pairs.StateChanges { - if kvPair.Remove { - if err := b.Delete(pairs.Actor, kvPair.Key); err != nil { - return err - } - } else { - if err := b.Set(pairs.Actor, kvPair.Key, kvPair.Value); err != nil { - return err - } - } - } - } - - if err := b.Write(); err != nil { - return err - } - - return nil -} - -// GetLatestVersion returns the latest version of the store. -func (ss *StorageStore) GetLatestVersion() (uint64, error) { - return ss.db.GetLatestVersion() -} - -// SetLatestVersion sets the latest version of the store. -func (ss *StorageStore) SetLatestVersion(version uint64) error { - return ss.db.SetLatestVersion(version) -} - -// VersionExists returns true if the given version exists in the store. -func (ss *StorageStore) VersionExists(version uint64) (bool, error) { - return ss.db.VersionExists(version) -} - -// Iterator returns an iterator over the specified domain and prefix. -func (ss *StorageStore) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - return ss.db.Iterator(storeKey, version, start, end) -} - -// ReverseIterator returns an iterator over the specified domain and prefix in reverse. -func (ss *StorageStore) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - return ss.db.ReverseIterator(storeKey, version, start, end) -} - -// Prune prunes the store up to the given version. -func (ss *StorageStore) Prune(version uint64) error { - return ss.db.Prune(version) -} - -// Restore restores the store from the given channel. -func (ss *StorageStore) Restore(version uint64, chStorage <-chan *corestore.StateChanges) error { - latestVersion, err := ss.db.GetLatestVersion() - if err != nil { - return fmt.Errorf("failed to get latest version: %w", err) - } - if version <= latestVersion { - return fmt.Errorf("the snapshot version %d is not greater than latest version %d", version, latestVersion) - } - - b, err := ss.db.NewBatch(version) - if err != nil { - return err - } - - for kvPair := range chStorage { - for _, kv := range kvPair.StateChanges { - if err := b.Set(kvPair.Actor, kv.Key, kv.Value); err != nil { - return err - } - if b.Size() > defaultBatchBufferSize { - if err := b.Write(); err != nil { - return err - } - b, err = ss.db.NewBatch(version) - if err != nil { - return err - } - } - } - } - - if b.Size() > 0 { - if err := b.Write(); err != nil { - return err - } - } - - return nil -} - -// PruneStoreKeys prunes the store keys which implements the store.UpgradableDatabase -// interface. -func (ss *StorageStore) PruneStoreKeys(storeKeys []string, version uint64) error { - gdb, ok := ss.db.(store.UpgradableDatabase) - if !ok { - return errors.New("db does not implement UpgradableDatabase interface") - } - - return gdb.PruneStoreKeys(storeKeys, version) -} - -// Close closes the store. -func (ss *StorageStore) Close() error { - return ss.db.Close() -} diff --git a/store/v2/storage/util/iterator.go b/store/v2/storage/util/iterator.go deleted file mode 100644 index fe207314c717..000000000000 --- a/store/v2/storage/util/iterator.go +++ /dev/null @@ -1,53 +0,0 @@ -package util - -// IterateWithPrefix returns the begin and end keys for an iterator over a domain -// and prefix. -func IterateWithPrefix(prefix, begin, end []byte) ([]byte, []byte) { - if len(prefix) == 0 { - return begin, end - } - - begin = cloneAppend(prefix, begin) - - if end == nil { - end = CopyIncr(prefix) - } else { - end = cloneAppend(prefix, end) - } - - return begin, end -} - -func cloneAppend(front, tail []byte) (res []byte) { - res = make([]byte, len(front)+len(tail)) - - n := copy(res, front) - copy(res[n:], tail) - - return res -} - -func CopyIncr(bz []byte) []byte { - if len(bz) == 0 { - panic("copyIncr expects non-zero bz length") - } - - ret := make([]byte, len(bz)) - copy(ret, bz) - - for i := len(bz) - 1; i >= 0; i-- { - if ret[i] < byte(0xFF) { - ret[i]++ - return ret - } - - ret[i] = byte(0x00) - - if i == 0 { - // overflow - return nil - } - } - - return nil -} diff --git a/store/v2/store.go b/store/v2/store.go index bf967d0f78a6..20c6ab3c8ef2 100644 --- a/store/v2/store.go +++ b/store/v2/store.go @@ -61,9 +61,6 @@ type RootStore interface { // Backend defines the interface for the RootStore backends. type Backend interface { - // GetStateStorage returns the SS backend. - GetStateStorage() VersionedWriter - // GetStateCommitment returns the SC backend. GetStateCommitment() Committer } diff --git a/tests/integration/accounts/base_account_test.go b/tests/integration/accounts/base_account_test.go index a50975b8ff79..8db4cbf1a9e3 100644 --- a/tests/integration/accounts/base_account_test.go +++ b/tests/integration/accounts/base_account_test.go @@ -51,12 +51,14 @@ func TestBaseAccount(t *testing.T) { } func sendTx(t *testing.T, ctx sdk.Context, app *simapp.SimApp, sender []byte, msg sdk.Msg) { + t.Helper() tx := sign(t, ctx, app, sender, privKey, msg) _, _, err := app.SimDeliver(app.TxEncode, tx) require.NoError(t, err) } func sign(t *testing.T, ctx sdk.Context, app *simapp.SimApp, from sdk.AccAddress, privKey cryptotypes.PrivKey, msg sdk.Msg) sdk.Tx { + t.Helper() r := rand.New(rand.NewSource(0)) accNum, err := app.AccountsKeeper.AccountByNumber.Get(ctx, from) @@ -81,12 +83,14 @@ func sign(t *testing.T, ctx sdk.Context, app *simapp.SimApp, from sdk.AccAddress } func bechify(t *testing.T, app *simapp.SimApp, addr []byte) string { + t.Helper() bech32, err := app.AuthKeeper.AddressCodec().BytesToString(addr) require.NoError(t, err) return bech32 } func fundAccount(t *testing.T, app *simapp.SimApp, ctx sdk.Context, addr sdk.AccAddress, amt string) { + t.Helper() require.NoError(t, testutil.FundAccount(ctx, app.BankKeeper, addr, coins(t, amt))) } diff --git a/tests/integration/accounts/bundler_test.go b/tests/integration/accounts/bundler_test.go index 1b94ddd78fa1..2fb88983ddf6 100644 --- a/tests/integration/accounts/bundler_test.go +++ b/tests/integration/accounts/bundler_test.go @@ -209,6 +209,7 @@ func TestMsgServer_ExecuteBundle(t *testing.T) { } func makeTx(t *testing.T, msg gogoproto.Message, sig []byte, xt *account_abstractionv1.TxExtension) []byte { + t.Helper() anyMsg, err := codectypes.NewAnyWithValue(msg) require.NoError(t, err) diff --git a/tests/integration/v2/auth/app_test.go b/tests/integration/v2/auth/app_test.go index 36326783216f..6331492b014c 100644 --- a/tests/integration/v2/auth/app_test.go +++ b/tests/integration/v2/auth/app_test.go @@ -22,7 +22,6 @@ import ( _ "cosmossdk.io/x/consensus" // import as blank for app wiring _ "cosmossdk.io/x/staking" // import as blank for app wirings - "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/tests/integration/v2" "github.com/cosmos/cosmos-sdk/testutil/configurator" _ "github.com/cosmos/cosmos-sdk/x/auth" // import as blank for app wiring @@ -35,7 +34,6 @@ import ( type suite struct { app *integration.App - cdc codec.Codec ctx context.Context authKeeper authkeeper.AccountKeeper From af4b7d20c57349bbd52ca73244302b1fbbed7b9f Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Tue, 3 Dec 2024 18:11:19 +0100 Subject: [PATCH 16/17] refactor(server/v2/grpc): simplify node service (#22728) --- proto/cosmos/base/node/v2/query.proto | 22 - scripts/build/protobuf.mk | 2 +- server/v2/api/grpc/nodeservice/query.pb.go | 532 ------------------ server/v2/api/grpc/nodeservice/query.pb.gw.go | 153 ----- server/v2/api/grpc/nodeservice/service.go | 29 - server/v2/api/grpc/server.go | 4 - server/v2/cometbft/grpc.go | 11 +- server/v2/go.mod | 2 +- tools/confix/data/v2-app.toml | 15 +- tools/confix/migrations.go | 2 - 10 files changed, 17 insertions(+), 755 deletions(-) delete mode 100644 proto/cosmos/base/node/v2/query.proto delete mode 100644 server/v2/api/grpc/nodeservice/query.pb.go delete mode 100644 server/v2/api/grpc/nodeservice/query.pb.gw.go delete mode 100644 server/v2/api/grpc/nodeservice/service.go diff --git a/proto/cosmos/base/node/v2/query.proto b/proto/cosmos/base/node/v2/query.proto deleted file mode 100644 index 24de6fd21223..000000000000 --- a/proto/cosmos/base/node/v2/query.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; -package cosmos.base.node.v2; - -import "google/api/annotations.proto"; - -option go_package = "cosmossdk.io/server/v2/api/grpc/nodeservice"; - -// Service defines the gRPC querier service for node related queries. -service Service { - // Config queries for the operator configuration. - rpc Config(ConfigRequest) returns (ConfigResponse) { - option (google.api.http).get = "/cosmos/base/node/v2/config"; - } -} - -// ConfigRequest defines the request structure for the Config gRPC query. -message ConfigRequest {} - -// ConfigResponse defines the response structure for the Config gRPC query. -message ConfigResponse { - string minimum_gas_price = 1; -} \ No newline at end of file diff --git a/scripts/build/protobuf.mk b/scripts/build/protobuf.mk index d269e624331d..25d98b3f9538 100644 --- a/scripts/build/protobuf.mk +++ b/scripts/build/protobuf.mk @@ -1,4 +1,4 @@ -protoVer=0.15.1 +protoVer=0.15.2 protoImageName=ghcr.io/cosmos/proto-builder:$(protoVer) protoImage=$(DOCKER) run --rm -v $(CURDIR):/workspace --workdir /workspace $(protoImageName) diff --git a/server/v2/api/grpc/nodeservice/query.pb.go b/server/v2/api/grpc/nodeservice/query.pb.go deleted file mode 100644 index 7201c35db4f1..000000000000 --- a/server/v2/api/grpc/nodeservice/query.pb.go +++ /dev/null @@ -1,532 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: cosmos/base/node/v2/query.proto - -package nodeservice - -import ( - context "context" - fmt "fmt" - grpc1 "github.com/cosmos/gogoproto/grpc" - proto "github.com/cosmos/gogoproto/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ConfigRequest defines the request structure for the Config gRPC query. -type ConfigRequest struct { -} - -func (m *ConfigRequest) Reset() { *m = ConfigRequest{} } -func (m *ConfigRequest) String() string { return proto.CompactTextString(m) } -func (*ConfigRequest) ProtoMessage() {} -func (*ConfigRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e86a941b0be4e1ff, []int{0} -} -func (m *ConfigRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ConfigRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ConfigRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfigRequest.Merge(m, src) -} -func (m *ConfigRequest) XXX_Size() int { - return m.Size() -} -func (m *ConfigRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ConfigRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ConfigRequest proto.InternalMessageInfo - -// ConfigResponse defines the response structure for the Config gRPC query. -type ConfigResponse struct { - MinimumGasPrice string `protobuf:"bytes,1,opt,name=minimum_gas_price,json=minimumGasPrice,proto3" json:"minimum_gas_price,omitempty"` -} - -func (m *ConfigResponse) Reset() { *m = ConfigResponse{} } -func (m *ConfigResponse) String() string { return proto.CompactTextString(m) } -func (*ConfigResponse) ProtoMessage() {} -func (*ConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e86a941b0be4e1ff, []int{1} -} -func (m *ConfigResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ConfigResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ConfigResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfigResponse.Merge(m, src) -} -func (m *ConfigResponse) XXX_Size() int { - return m.Size() -} -func (m *ConfigResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ConfigResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ConfigResponse proto.InternalMessageInfo - -func (m *ConfigResponse) GetMinimumGasPrice() string { - if m != nil { - return m.MinimumGasPrice - } - return "" -} - -func init() { - proto.RegisterType((*ConfigRequest)(nil), "cosmos.base.node.v2.ConfigRequest") - proto.RegisterType((*ConfigResponse)(nil), "cosmos.base.node.v2.ConfigResponse") -} - -func init() { proto.RegisterFile("cosmos/base/node/v2/query.proto", fileDescriptor_e86a941b0be4e1ff) } - -var fileDescriptor_e86a941b0be4e1ff = []byte{ - // 275 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0xb1, 0x4a, 0xf4, 0x40, - 0x14, 0x85, 0x33, 0x7f, 0xb1, 0x3f, 0x0e, 0xe8, 0x62, 0x6c, 0x64, 0xd5, 0x51, 0xb2, 0x8d, 0x28, - 0xcc, 0x40, 0x6c, 0xad, 0x14, 0xb1, 0x95, 0xb5, 0xb3, 0x59, 0x66, 0xb3, 0xd7, 0x30, 0x68, 0xe6, - 0x66, 0xe7, 0x26, 0x01, 0x4b, 0x7d, 0x02, 0xc1, 0x97, 0xb2, 0x5c, 0xb0, 0xb1, 0x94, 0xc4, 0x07, - 0x91, 0xc9, 0x68, 0x21, 0x2c, 0xb6, 0xe7, 0x7e, 0xe7, 0xdc, 0xc3, 0xe1, 0xfb, 0x19, 0x52, 0x81, - 0xa4, 0x66, 0x9a, 0x40, 0x59, 0x9c, 0x83, 0x6a, 0x52, 0xb5, 0xa8, 0xc1, 0x3d, 0xc8, 0xd2, 0x61, - 0x85, 0xf1, 0x56, 0x00, 0xa4, 0x07, 0xa4, 0x07, 0x64, 0x93, 0x8e, 0x76, 0x73, 0xc4, 0xfc, 0x1e, - 0x94, 0x2e, 0x8d, 0xd2, 0xd6, 0x62, 0xa5, 0x2b, 0x83, 0x96, 0x82, 0x25, 0x19, 0xf2, 0xf5, 0x73, - 0xb4, 0xb7, 0x26, 0x9f, 0xc0, 0xa2, 0x06, 0xaa, 0x92, 0x53, 0xbe, 0xf1, 0x23, 0x50, 0x89, 0x96, - 0x20, 0x3e, 0xe2, 0x9b, 0x85, 0xb1, 0xa6, 0xa8, 0x8b, 0x69, 0xae, 0x69, 0x5a, 0x3a, 0x93, 0xc1, - 0x36, 0x3b, 0x60, 0x87, 0x6b, 0x93, 0xe1, 0xf7, 0xe1, 0x52, 0xd3, 0x95, 0x97, 0xd3, 0x47, 0xc6, - 0xff, 0x5f, 0x83, 0x6b, 0x4c, 0x06, 0x71, 0xc3, 0x07, 0x21, 0x29, 0x4e, 0xe4, 0x8a, 0x62, 0xf2, - 0xd7, 0xdf, 0xd1, 0xf8, 0x4f, 0x26, 0x54, 0x49, 0xc6, 0x4f, 0x6f, 0x9f, 0x2f, 0xff, 0xf6, 0xe2, - 0x1d, 0xb5, 0x6a, 0x8a, 0xac, 0x87, 0xcf, 0x2e, 0x5e, 0x5b, 0xc1, 0x96, 0xad, 0x60, 0x1f, 0xad, - 0x60, 0xcf, 0x9d, 0x88, 0x96, 0x9d, 0x88, 0xde, 0x3b, 0x11, 0xdd, 0x1c, 0x07, 0x17, 0xcd, 0xef, - 0xa4, 0x41, 0x45, 0xe0, 0x1a, 0x70, 0xde, 0xe8, 0xa7, 0xc9, 0x5d, 0x99, 0xf5, 0x49, 0x14, 0xea, - 0xcf, 0x06, 0xfd, 0x40, 0x27, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbb, 0xb3, 0x6b, 0x93, 0x76, - 0x01, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ServiceClient is the client API for Service service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ServiceClient interface { - // Config queries for the operator configuration. - Config(ctx context.Context, in *ConfigRequest, opts ...grpc.CallOption) (*ConfigResponse, error) -} - -type serviceClient struct { - cc grpc1.ClientConn -} - -func NewServiceClient(cc grpc1.ClientConn) ServiceClient { - return &serviceClient{cc} -} - -func (c *serviceClient) Config(ctx context.Context, in *ConfigRequest, opts ...grpc.CallOption) (*ConfigResponse, error) { - out := new(ConfigResponse) - err := c.cc.Invoke(ctx, "/cosmos.base.node.v2.Service/Config", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ServiceServer is the server API for Service service. -type ServiceServer interface { - // Config queries for the operator configuration. - Config(context.Context, *ConfigRequest) (*ConfigResponse, error) -} - -// UnimplementedServiceServer can be embedded to have forward compatible implementations. -type UnimplementedServiceServer struct { -} - -func (*UnimplementedServiceServer) Config(ctx context.Context, req *ConfigRequest) (*ConfigResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Config not implemented") -} - -func RegisterServiceServer(s grpc1.Server, srv ServiceServer) { - s.RegisterService(&_Service_serviceDesc, srv) -} - -func _Service_Config_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).Config(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cosmos.base.node.v2.Service/Config", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).Config(ctx, req.(*ConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var Service_serviceDesc = _Service_serviceDesc -var _Service_serviceDesc = grpc.ServiceDesc{ - ServiceName: "cosmos.base.node.v2.Service", - HandlerType: (*ServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Config", - Handler: _Service_Config_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "cosmos/base/node/v2/query.proto", -} - -func (m *ConfigRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConfigRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *ConfigResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConfigResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.MinimumGasPrice) > 0 { - i -= len(m.MinimumGasPrice) - copy(dAtA[i:], m.MinimumGasPrice) - i = encodeVarintQuery(dAtA, i, uint64(len(m.MinimumGasPrice))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ConfigRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *ConfigResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.MinimumGasPrice) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ConfigRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MinimumGasPrice", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MinimumGasPrice = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/server/v2/api/grpc/nodeservice/query.pb.gw.go b/server/v2/api/grpc/nodeservice/query.pb.gw.go deleted file mode 100644 index b301d29dccfe..000000000000 --- a/server/v2/api/grpc/nodeservice/query.pb.gw.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: cosmos/base/node/v2/query.proto - -/* -Package nodeservice is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package nodeservice - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -func request_Service_Config_0(ctx context.Context, marshaler runtime.Marshaler, client ServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ConfigRequest - var metadata runtime.ServerMetadata - - msg, err := client.Config(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Service_Config_0(ctx context.Context, marshaler runtime.Marshaler, server ServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ConfigRequest - var metadata runtime.ServerMetadata - - msg, err := server.Config(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterServiceHandlerServer registers the http handlers for service Service to "mux". -// UnaryRPC :call ServiceServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterServiceHandlerFromEndpoint instead. -func RegisterServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ServiceServer) error { - - mux.Handle("GET", pattern_Service_Config_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Service_Config_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Service_Config_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterServiceHandlerFromEndpoint is same as RegisterServiceHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterServiceHandler(ctx, mux, conn) -} - -// RegisterServiceHandler registers the http handlers for service Service to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterServiceHandlerClient(ctx, mux, NewServiceClient(conn)) -} - -// RegisterServiceHandlerClient registers the http handlers for service Service -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ServiceClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ServiceClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "ServiceClient" to call the correct interceptors. -func RegisterServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ServiceClient) error { - - mux.Handle("GET", pattern_Service_Config_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Service_Config_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Service_Config_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Service_Config_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"cosmos", "base", "node", "v2", "config"}, "", runtime.AssumeColonVerbOpt(false))) -) - -var ( - forward_Service_Config_0 = runtime.ForwardResponseMessage -) diff --git a/server/v2/api/grpc/nodeservice/service.go b/server/v2/api/grpc/nodeservice/service.go deleted file mode 100644 index 51f84d36142f..000000000000 --- a/server/v2/api/grpc/nodeservice/service.go +++ /dev/null @@ -1,29 +0,0 @@ -package nodeservice - -import ( - context "context" - - "cosmossdk.io/core/server" -) - -var _ ServiceServer = queryServer{} - -type queryServer struct { - cfg server.ConfigMap -} - -func NewQueryServer(cfg server.ConfigMap) ServiceServer { - return queryServer{cfg: cfg} -} - -func (s queryServer) Config(ctx context.Context, _ *ConfigRequest) (*ConfigResponse, error) { - minGasPricesStr := "" - minGasPrices, ok := s.cfg["server"].(map[string]interface{})["minimum-gas-prices"] - if ok { - minGasPricesStr = minGasPrices.(string) - } - - return &ConfigResponse{ - MinimumGasPrice: minGasPricesStr, - }, nil -} diff --git a/server/v2/api/grpc/server.go b/server/v2/api/grpc/server.go index 9aa06515de33..5775a6fd0e1e 100644 --- a/server/v2/api/grpc/server.go +++ b/server/v2/api/grpc/server.go @@ -26,7 +26,6 @@ import ( "cosmossdk.io/log" serverv2 "cosmossdk.io/server/v2" "cosmossdk.io/server/v2/api/grpc/gogoreflection" - "cosmossdk.io/server/v2/api/grpc/nodeservice" ) const ( @@ -75,9 +74,6 @@ func New[T transaction.Tx]( // register grpc query handler v2 RegisterServiceServer(grpcSrv, &v2Service{queryHandlers, queryable}) - // register node service - nodeservice.RegisterServiceServer(grpcSrv, nodeservice.NewQueryServer(cfg)) - // reflection allows external clients to see what services and methods the gRPC server exposes. gogoreflection.Register(grpcSrv, slices.Collect(maps.Keys(queryHandlers)), logger.With("sub-module", "grpc-reflection")) diff --git a/server/v2/cometbft/grpc.go b/server/v2/cometbft/grpc.go index 5a2d64d7d2a2..66bbc86731f1 100644 --- a/server/v2/cometbft/grpc.go +++ b/server/v2/cometbft/grpc.go @@ -2,6 +2,7 @@ package cometbft import ( "context" + "fmt" abci "github.com/cometbft/cometbft/abci/types" abciproto "github.com/cometbft/cometbft/api/cometbft/abci/v1" @@ -15,6 +16,7 @@ import ( "cosmossdk.io/core/server" corestore "cosmossdk.io/core/store" "cosmossdk.io/core/transaction" + storeserver "cosmossdk.io/server/v2/store" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" @@ -208,10 +210,15 @@ func (s nodeServer[T]) Config(ctx context.Context, _ *nodeservice.ConfigRequest) minGasPricesStr = minGasPrices.(string) } + storeCfg, err := storeserver.UnmarshalConfig(s.cfg) + if err != nil { + return nil, err + } + return &nodeservice.ConfigResponse{ MinimumGasPrice: minGasPricesStr, - PruningKeepRecent: "ambiguous in v2", - PruningInterval: "ambiguous in v2", + PruningKeepRecent: fmt.Sprintf("%d", storeCfg.Options.SCPruningOption.KeepRecent), + PruningInterval: fmt.Sprintf("%d", storeCfg.Options.SCPruningOption.Interval), HaltHeight: s.cometBFTAppConfig.HaltHeight, }, nil } diff --git a/server/v2/go.mod b/server/v2/go.mod index 0f84cf2ef70e..8de9635ee4ec 100644 --- a/server/v2/go.mod +++ b/server/v2/go.mod @@ -35,7 +35,6 @@ require ( github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.10.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 google.golang.org/grpc v1.68.0 google.golang.org/protobuf v1.35.2 ) @@ -113,6 +112,7 @@ require ( golang.org/x/text v0.20.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/tools/confix/data/v2-app.toml b/tools/confix/data/v2-app.toml index 394cc41e7b09..2f9ed9e734d3 100644 --- a/tools/confix/data/v2-app.toml +++ b/tools/confix/data/v2-app.toml @@ -41,6 +41,12 @@ max-recv-msg-size = 10485760 # The default value is math.MaxInt32. max-send-msg-size = 2147483647 +[grpc-gateway] +# Enable defines if the gRPC-gateway should be enabled. +enable = true +# Address defines the address the gRPC-gateway server binds to. +address = 'localhost:1317' + [rest] # Enable defines if the REST server should be enabled. enable = true @@ -56,18 +62,9 @@ minimum-gas-prices = '0stake' app-db-backend = 'goleveldb' [store.options] -# State storage database type. Currently we support: "pebble" and "rocksdb" -ss-type = 'pebble' # State commitment database type. Currently we support: "iavl" and "iavl-v2" sc-type = 'iavl' -# Pruning options for state storage -[store.options.ss-pruning-option] -# Number of recent heights to keep on disk. -keep-recent = 2 -# Height interval at which pruned heights are removed from disk. -interval = 100 - # Pruning options for state commitment [store.options.sc-pruning-option] # Number of recent heights to keep on disk. diff --git a/tools/confix/migrations.go b/tools/confix/migrations.go index 77fc4a671cb4..3589124c733b 100644 --- a/tools/confix/migrations.go +++ b/tools/confix/migrations.go @@ -46,11 +46,9 @@ var v2KeyChanges = v2KeyChangesMap{ "halt-time": []string{"comet.halt-time"}, "app-db-backend": []string{"store.app-db-backend"}, "pruning-keep-recent": []string{ - "store.options.ss-pruning-option.keep-recent", "store.options.sc-pruning-option.keep-recent", }, "pruning-interval": []string{ - "store.options.ss-pruning-option.interval", "store.options.sc-pruning-option.interval", }, "iavl-cache-size": []string{"store.options.iavl-config.cache-size"}, From 371e05abc43cd16e5793aeeca0996e2db627f1a9 Mon Sep 17 00:00:00 2001 From: Damian Nolan Date: Tue, 3 Dec 2024 22:10:05 +0100 Subject: [PATCH 17/17] fix(baseapp): correctly set header info time on query ctx (#22732) --- baseapp/abci.go | 1 + 1 file changed, 1 insertion(+) diff --git a/baseapp/abci.go b/baseapp/abci.go index d9921a8e13d5..9bcdb2ea4e10 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -1327,6 +1327,7 @@ func (app *BaseApp) CreateQueryContextWithCheckHeader(height int64, prove, check WithHeaderInfo(coreheader.Info{ ChainID: app.chainID, Height: height, + Time: header.Time, }). WithBlockHeader(*header). WithBlockHeight(height)