Skip to content

Commit

Permalink
Don't clear out resolutions from the votestore if the DB is restored …
Browse files Browse the repository at this point in the history
…using a statesync snapshot (#1312)
  • Loading branch information
charithabandi authored Feb 3, 2025
1 parent 2a7bdce commit bd6ab70
Show file tree
Hide file tree
Showing 8 changed files with 357 additions and 129 deletions.
11 changes: 6 additions & 5 deletions app/node/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -214,16 +214,15 @@ func initializeStatesyncService(ctx context.Context, d *coreDependencies, p2p *n
func buildDB(ctx context.Context, d *coreDependencies, ss *node.StateSyncService, closers *closeFuncs) *pg.DB {
pg.UseLogger(d.logger.New("PG"))

fromSnapshot := restoreDB(d, ctx, ss)
fromGenesisSnapshot := restoreDB(d, ctx, ss)

db, err := d.dbOpener(ctx, d.cfg.DB.DBName, d.cfg.DB.MaxConns)
if err != nil {
failBuild(err, "failed to open kwild postgres database")
}
closers.addCloser(db.Close, "Closing application DB")

if fromSnapshot {
d.logger.Info("DB restored from snapshot", "snapshot", d.cfg.GenesisState)
if fromGenesisSnapshot {
// readjust the expiry heights of all the pending resolutions after snapshot restore for Zero-downtime migrations
// snapshot tool handles the migration expiry height readjustment for offline migrations
// adjustExpiration := false
Expand Down Expand Up @@ -254,7 +253,7 @@ func buildDB(ctx context.Context, d *coreDependencies, ss *node.StateSyncService
// - If statesync is enabled. Statesync will take care of syncing the database
// to the network state using statesync snapshots.
//
// returns true if the DB was restored from snapshot, false otherwise.
// returns true if the DB was restored from genesis snapshot, false otherwise.
func restoreDB(d *coreDependencies, ctx context.Context, ss *node.StateSyncService) bool {
if isDbInitialized(ctx, d) {
return false
Expand All @@ -269,7 +268,7 @@ func restoreDB(d *coreDependencies, ctx context.Context, ss *node.StateSyncServi

if success {
d.logger.Info("DB restored from statesync snapshot")
return true
return false
}

// If statesync is not successful, restore from the genesis snapshot if available
Expand Down Expand Up @@ -316,6 +315,8 @@ func restoreDB(d *coreDependencies, ctx context.Context, ss *node.StateSyncServi
if err != nil {
failBuild(err, "failed to restore DB from snapshot")
}

d.logger.Info("DB restored from snapshot", "snapshot", d.cfg.GenesisState)
return true
}

Expand Down
4 changes: 2 additions & 2 deletions node/consensus/blocksync.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ func (ce *ConsensusEngine) syncBlocksUntilHeight(ctx context.Context, startHeigh
func (ce *ConsensusEngine) syncBlockWithRetry(ctx context.Context, height int64) error {
_, rawblk, ci, err := ce.getBlock(ctx, height)
if err != nil { // all kinds of errors?
return fmt.Errorf("error requesting block from network: height : %d, error: %w", height, err)
return err
}

return ce.applyBlock(ctx, rawblk, ci)
Expand All @@ -167,7 +167,7 @@ func (ce *ConsensusEngine) syncBlockWithRetry(ctx context.Context, height int64)
func (ce *ConsensusEngine) syncBlock(ctx context.Context, height int64) error {
_, rawblk, ci, err := ce.blkRequester(ctx, height)
if err != nil { // all kinds of errors?
return fmt.Errorf("error requesting block from network: height : %d, error: %w", height, err)
return err
}

return ce.applyBlock(ctx, rawblk, ci)
Expand Down
1 change: 0 additions & 1 deletion node/consensus/engine.go
Original file line number Diff line number Diff line change
Expand Up @@ -797,7 +797,6 @@ func (ce *ConsensusEngine) processCurrentBlock(ctx context.Context) error {
// otherwise rollback.
blkHash, rawBlk, ci, err := ce.getBlock(ctx, ce.state.blkProp.height)
if err != nil {
ce.log.Debug("Error requesting block from network", "height", ce.state.blkProp.height, "error", err)
return err
}

Expand Down
10 changes: 5 additions & 5 deletions node/node_live_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func TestSingleNodeMocknet(t *testing.T) {
valSetList = append(valSetList, &v)
}

ss := newSnapshotStore()
ss := newSnapshotStore(bs1)

_, vsReal, err := voting.NewResolutionStore(ctx, db1)
require.NoError(t, err)
Expand All @@ -103,7 +103,7 @@ func TestSingleNodeMocknet(t *testing.T) {
accounts, err := accounts.InitializeAccountStore(ctx, db1, log.DiscardLogger)
require.NoError(t, err)

migrator, err := migrations.SetupMigrator(ctx, db1, newSnapshotStore(), accounts, filepath.Join(root1, "migrations"), mparams, vsReal, log.New(log.WithName("MIGRATOR")))
migrator, err := migrations.SetupMigrator(ctx, db1, newSnapshotStore(bs1), accounts, filepath.Join(root1, "migrations"), mparams, vsReal, log.New(log.WithName("MIGRATOR")))
require.NoError(t, err)

signer := auth.GetNodeSigner(privKeys[0])
Expand Down Expand Up @@ -238,7 +238,7 @@ func TestDualNodeMocknet(t *testing.T) {
for _, v := range valSet {
valSetList = append(valSetList, &v)
}
ss := newSnapshotStore()
ss := newSnapshotStore(bs1)

genCfg := config.DefaultGenesisConfig()
genCfg.Leader = ktypes.PublicKey{PublicKey: privKeys[0].Public()}
Expand All @@ -264,7 +264,7 @@ func TestDualNodeMocknet(t *testing.T) {
}, accounts1, vstore1)
require.NoError(t, err)

migrator, err := migrations.SetupMigrator(ctx, db1, newSnapshotStore(), accounts1, filepath.Join(root1, "migrations"), mparams, vstore1, log.New(log.WithName("MIGRATOR")))
migrator, err := migrations.SetupMigrator(ctx, db1, newSnapshotStore(bs1), accounts1, filepath.Join(root1, "migrations"), mparams, vstore1, log.New(log.WithName("MIGRATOR")))
require.NoError(t, err)

bpl1 := log.New(log.WithName("BP1"), log.WithWriter(os.Stdout), log.WithLevel(log.LevelDebug), log.WithFormat(log.FormatUnstructured))
Expand Down Expand Up @@ -338,7 +338,7 @@ func TestDualNodeMocknet(t *testing.T) {
_, vstore2, err := voting.NewResolutionStore(ctx, db2)
require.NoError(t, err)

migrator2, err := migrations.SetupMigrator(ctx, db2, newSnapshotStore(), accounts2, filepath.Join(root2, "migrations"), mparams, vstore2, log.New(log.WithName("MIGRATOR")))
migrator2, err := migrations.SetupMigrator(ctx, db2, newSnapshotStore(bs2), accounts2, filepath.Join(root2, "migrations"), mparams, vstore2, log.New(log.WithName("MIGRATOR")))
require.NoError(t, err)

signer2 := auth.GetNodeSigner(privKeys[1])
Expand Down
2 changes: 1 addition & 1 deletion node/node_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ func makeTestHosts(t *testing.T, nNodes, nExtraHosts int, blockInterval time.Dur
Statesync: &defaultConfigSet.StateSync,
Mempool: mempool.New(),
BlockStore: bs,
Snapshotter: newSnapshotStore(),
Snapshotter: newSnapshotStore(bs),
Consensus: ce,
BlockProc: &dummyBP{},
}
Expand Down
Loading

0 comments on commit bd6ab70

Please sign in to comment.