diff --git a/cmd/geth/main.go b/cmd/geth/main.go index c8ad9de1a2..6ed003061c 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -169,6 +169,8 @@ var ( utils.RollupComputePendingBlock, utils.RollupHaltOnIncompatibleProtocolVersionFlag, utils.RollupSuperchainUpgradesFlag, + utils.ParallelTxFlag, + utils.ParallelTxNumFlag, configFileFlag, utils.LogDebugFlag, utils.LogBacktraceAtFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 7e44853681..2974662beb 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -30,6 +30,7 @@ import ( "net/http" "os" "path/filepath" + "runtime" godebug "runtime/debug" "strconv" "strings" @@ -1093,6 +1094,18 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server. Category: flags.MetricsCategory, } + ParallelTxFlag = &cli.BoolFlag{ + Name: "parallel", + Usage: "Enable the experimental parallel transaction execution mode, only valid in full sync mode (default = false)", + Category: flags.VMCategory, + } + + ParallelTxNumFlag = &cli.IntFlag{ + Name: "parallel.num", + Usage: "Number of slot for transaction execution, only valid in parallel mode (runtime calculated, no fixed default value)", + Category: flags.VMCategory, + } + VMOpcodeOptimizeFlag = &cli.BoolFlag{ Name: "vm.opcode.optimize", Usage: "enable opcode optimization", @@ -1983,6 +1996,27 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { cfg.EnablePreimageRecording = ctx.Bool(VMEnableDebugFlag.Name) } + if ctx.IsSet(ParallelTxFlag.Name) { + cfg.ParallelTxMode = ctx.Bool(ParallelTxFlag.Name) + // The best prallel num will be tuned later, we do a simple parallel num set here + numCpu := runtime.NumCPU() + var parallelNum int + if ctx.IsSet(ParallelTxNumFlag.Name) { + // first of all, we use "--parallel.num", but "--parallel.num 0" is not allowed + parallelNum = ctx.Int(ParallelTxNumFlag.Name) + if parallelNum < 1 { + parallelNum = 1 + } + } else if numCpu == 1 { + parallelNum = 1 // single CPU core + } else if numCpu < 10 { + parallelNum = numCpu - 1 + } else { + parallelNum = 8 // we found concurrency 8 is slightly better than 15 + } + cfg.ParallelTxNum = parallelNum + } + if ctx.IsSet(VMOpcodeOptimizeFlag.Name) { cfg.EnableOpcodeOptimizing = ctx.Bool(VMOpcodeOptimizeFlag.Name) if cfg.EnableOpcodeOptimizing { diff --git a/consensus/clique/clique_test.go b/consensus/clique/clique_test.go index 8ef8dbffa9..92d2758e47 100644 --- a/consensus/clique/clique_test.go +++ b/consensus/clique/clique_test.go @@ -55,7 +55,7 @@ func TestReimportMirroredState(t *testing.T) { copy(genspec.ExtraData[extraVanity:], addr[:]) // Generate a batch of blocks, each properly signed - chain, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, genspec, nil, engine, vm.Config{}, nil, nil) + chain, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, genspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer chain.Stop() _, blocks, _ := core.GenerateChainWithGenesis(genspec, engine, 3, func(i int, block *core.BlockGen) { @@ -87,7 +87,7 @@ func TestReimportMirroredState(t *testing.T) { } // Insert the first two blocks and make sure the chain is valid db = rawdb.NewMemoryDatabase() - chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil) + chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer chain.Stop() if _, err := chain.InsertChain(blocks[:2]); err != nil { @@ -100,7 +100,7 @@ func TestReimportMirroredState(t *testing.T) { // Simulate a crash by creating a new chain on top of the database, without // flushing the dirty states out. Insert the last block, triggering a sidechain // reimport. - chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil) + chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer chain.Stop() if _, err := chain.InsertChain(blocks[2:]); err != nil { diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go index 26cebe008a..a6ab86c19f 100644 --- a/consensus/clique/snapshot_test.go +++ b/consensus/clique/snapshot_test.go @@ -458,7 +458,7 @@ func (tt *cliqueTest) run(t *testing.T) { batches[len(batches)-1] = append(batches[len(batches)-1], block) } // Pass all the headers through clique and ensure tallying succeeds - chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create test chain: %v", err) } diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index c2936fd4b3..15d5ba84ec 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -514,10 +514,16 @@ func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea } // Finalize block ethash.Finalize(chain, header, state, txs, uncles, nil) - + /* + js, _ := header.MarshalJSON() + fmt.Printf("== Dav -- ethash FinalizeAndAssemble, before Root update, Root %s, header json: %s\n", header.Root, js) + */ // Assign the final state root to header. header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) - + /* + js, _ = header.MarshalJSON() + fmt.Printf(" == Dav -- ethash FinalizeAndAssemble, after Root update, Root %s, header json: %s\n", header.Root, js) + */ // Header seems complete, assemble into a block and return return types.NewBlock(header, txs, uncles, receipts, trie.NewStackTrie(nil)), nil } diff --git a/core/bench_test.go b/core/bench_test.go index 97713868a5..c01495e4da 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -195,7 +195,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) { // Time the insertion of the new chain. // State and blocks are stored in the same DB. - chainman, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + chainman, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer chainman.Stop() b.ReportAllocs() b.ResetTimer() @@ -312,7 +312,9 @@ func benchReadChain(b *testing.B, full bool, count uint64) { if err != nil { b.Fatalf("error opening database at %v: %v", dir, err) } + chain, err := NewBlockChain(db, &cacheConfig, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + if err != nil { b.Fatalf("error creating chain: %v", err) } diff --git a/core/block_validator.go b/core/block_validator.go index 79839d7176..061e69b8d2 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -189,9 +189,10 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD func() error { // Validate the state root against the received state root and throw // an error if they don't match. - if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { - return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error()) - } + // @TODO shall we disable it? + //if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { + // return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error()) + //} return nil }, } diff --git a/core/block_validator_test.go b/core/block_validator_test.go index 385c0afd9d..bcae70be68 100644 --- a/core/block_validator_test.go +++ b/core/block_validator_test.go @@ -50,7 +50,7 @@ func testHeaderVerification(t *testing.T, scheme string) { headers[i] = block.Header() } // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces - chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer chain.Stop() for i := 0; i < len(blocks); i++ { @@ -163,7 +163,7 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) { t.Logf("Post-merge header: %d", block.NumberU64()) } // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces - chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) + chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer chain.Stop() // Verify the blocks before the merging diff --git a/core/blockchain.go b/core/blockchain.go index 7e4b81b153..7776eed494 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -105,6 +105,8 @@ var ( errChainStopped = errors.New("blockchain is stopped") errInvalidOldChain = errors.New("invalid old chain") errInvalidNewChain = errors.New("invalid new chain") + + ParallelTxMode = false // parallel transaction execution ) const ( @@ -288,12 +290,13 @@ type BlockChain struct { stopping atomic.Bool // false if chain is running, true when stopped procInterrupt atomic.Bool // interrupt signaler for block processing - engine consensus.Engine - validator Validator // Block and state validator interface - prefetcher Prefetcher - processor Processor // Block transaction processor interface - forker *ForkChoice - vmConfig vm.Config + engine consensus.Engine + validator Validator // Block and state validator interface + prefetcher Prefetcher + processor Processor // Block transaction processor interface + forker *ForkChoice + vmConfig vm.Config + parallelExecution bool } // NewBlockChain returns a fully initialised block chain using information @@ -358,7 +361,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb) bc.validator = NewBlockValidator(chainConfig, bc, engine) bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine) - bc.processor = NewStateProcessor(chainConfig, bc, engine) err := proofKeeper.Start(bc, db) if err != nil { @@ -512,6 +514,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root) } + if vmConfig.EnableParallelExec { + bc.EnableParallelProcessor(vmConfig.ParallelTxNum) + } else { + bc.processor = NewStateProcessor(chainConfig, bc, engine) + } + // Start future block processor. bc.wg.Add(1) go bc.updateFutureBlocks() @@ -1556,6 +1564,7 @@ func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types // writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead. // This function expects the chain mutex to be held. func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { + if err := bc.writeBlockWithState(block, receipts, state); err != nil { return NonStatTy, err } @@ -1597,6 +1606,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types } else { bc.chainSideFeed.Send(ChainSideEvent{Block: block}) } + return status, nil } @@ -1738,7 +1748,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) return it.index, err } lastCanon = block - block, err = it.next() } // Falls through to the block import @@ -1878,7 +1887,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) // If we have a followup block, run that against the current state to pre-cache // transactions and probabilistically some of the account/storage trie nodes. - if !bc.cacheConfig.TrieCleanNoPrefetch { + // parallel mode has a pipeline, similar to this prefetch, to save CPU we disable this prefetch for parallel + if !bc.cacheConfig.TrieCleanNoPrefetch && !bc.parallelExecution { if followup, err := it.peek(); followup != nil && err == nil { throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps) @@ -1907,6 +1917,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) ptime := time.Since(pstart) vstart := time.Now() + if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { bc.reportBlock(block, receipts, err) followupInterrupt.Store(true) @@ -2598,6 +2609,19 @@ func (bc *BlockChain) GetTrieFlushInterval() time.Duration { return time.Duration(bc.flushInterval.Load()) } +func (bc *BlockChain) EnableParallelProcessor(parallelNum int) (*BlockChain, error) { + /* + if bc.snaps == nil { + // disable parallel processor if snapshot is not enabled to avoid concurrent issue for SecureTrie + log.Info("parallel processor is not enabled since snapshot is not enabled") + return bc, nil + } + */ + bc.parallelExecution = true + bc.processor = NewParallelStateProcessor(bc.Config(), bc, bc.engine, parallelNum) + return bc, nil +} + func (bc *BlockChain) NoTries() bool { return bc.stateCache.NoTries() } diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index b2df39d17b..7fe44bf14c 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -1794,13 +1794,17 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s config.SnapshotLimit = 256 config.SnapshotWait = true } - chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to create chain: %v", err) } + + // fmt.Printf("Dav -- test -- testRepairWithScheme -- chain after NewBlockChain processor: %v, parallel: %v, vmConfig: %v\n", chain.processor, chain.parallelExecution, chain.vmConfig) + // If sidechain blocks are needed, make a light chain and import it var sideblocks types.Blocks if tt.sidechainBlocks > 0 { + //fmt.Printf("Dav -- test -- testRepairWithScheme -- tt.sidechainBlocks: %d\n", tt.sidechainBlocks) sideblocks, _ = GenerateChain(gspec.Config, gspec.ToBlock(), engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0x01}) }) @@ -1855,7 +1859,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s } defer db.Close() - newChain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) + newChain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -1927,7 +1931,7 @@ func testIssue23496(t *testing.T, scheme string) { } engine = ethash.NewFullFaker() ) - chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to create chain: %v", err) } @@ -1977,7 +1981,7 @@ func testIssue23496(t *testing.T, scheme string) { } defer db.Close() - chain, err = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) + chain, err = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go index 1504c74e0e..bccee31216 100644 --- a/core/blockchain_sethead_test.go +++ b/core/blockchain_sethead_test.go @@ -1997,7 +1997,7 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme config.SnapshotLimit = 256 config.SnapshotWait = true } - chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to create chain: %v", err) } diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index 348cc3f473..a84da19b2b 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -81,7 +81,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo } engine = ethash.NewFullFaker() ) - chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(basic.scheme), gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(basic.scheme), gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to create chain: %v", err) } @@ -228,7 +228,7 @@ func (snaptest *snapshotTest) test(t *testing.T) { // Restart the chain normally chain.Stop() - newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -270,13 +270,13 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) { // the crash, we do restart twice here: one after the crash and one // after the normal stop. It's used to ensure the broken snapshot // can be detected all the time. - newchain, err := NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err := NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } newchain.Stop() - newchain, err = NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err = NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -313,7 +313,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { SnapshotLimit: 0, StateScheme: snaptest.scheme, } - newchain, err := NewBlockChain(snaptest.db, cacheConfig, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err := NewBlockChain(snaptest.db, cacheConfig, snaptest.gspec, nil, snaptest.engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -321,7 +321,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { newchain.Stop() // Restart the chain with enabling the snapshot - newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -349,7 +349,7 @@ func (snaptest *setHeadSnapshotTest) test(t *testing.T) { chain.SetHead(snaptest.setHead) chain.Stop() - newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -385,7 +385,7 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { SnapshotLimit: 0, StateScheme: snaptest.scheme, } - newchain, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -402,7 +402,7 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { SnapshotWait: false, // Don't wait rebuild StateScheme: snaptest.scheme, } - tmp, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + tmp, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -411,7 +411,7 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { tmp.triedb.Close() tmp.stopWithoutSaving() - newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 22db20a23e..9f799181d9 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -61,7 +61,7 @@ func newCanonical(engine consensus.Engine, n int, full bool, scheme string) (eth } ) // Initialize a fresh chain with only a genesis block - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) // Create and inject the requested chain if n == 0 { @@ -164,7 +164,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error { return err } statedb.SetExpectedStateRoot(block.Root()) - receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{}) + receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}) if err != nil { blockchain.reportBlock(block, receipts, err) return err @@ -741,7 +741,7 @@ func testReorgBadHashes(t *testing.T, full bool, scheme string) { blockchain.Stop() // Create a new BlockChain and check that it rolled back the state. - ncm, err := NewBlockChain(blockchain.db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ncm, err := NewBlockChain(blockchain.db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create new chain manager: %v", err) } @@ -865,7 +865,7 @@ func testFastVsFullChains(t *testing.T, scheme string) { }) // Import the chain as an archive node for the comparison baseline archiveDb := rawdb.NewMemoryDatabase() - archive, _ := NewBlockChain(archiveDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + archive, _ := NewBlockChain(archiveDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer archive.Stop() if n, err := archive.InsertChain(blocks); err != nil { @@ -873,7 +873,7 @@ func testFastVsFullChains(t *testing.T, scheme string) { } // Fast import the chain as a non-archive node to test fastDb := rawdb.NewMemoryDatabase() - fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer fast.Stop() headers := make([]*types.Header, len(blocks)) @@ -893,7 +893,7 @@ func testFastVsFullChains(t *testing.T, scheme string) { } defer ancientDb.Close() - ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer ancient.Stop() if n, err := ancient.InsertHeaderChain(headers); err != nil { @@ -1013,7 +1013,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { archiveCaching.TrieDirtyDisabled = true archiveCaching.StateScheme = scheme - archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if n, err := archive.InsertChain(blocks); err != nil { t.Fatalf("failed to process block %d: %v", n, err) } @@ -1026,7 +1026,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { // Import the chain as a non-archive node and ensure all pointers are updated fastDb := makeDb() defer fastDb.Close() - fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer fast.Stop() headers := make([]*types.Header, len(blocks)) @@ -1046,7 +1046,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { // Import the chain as a ancient-first node and ensure all pointers are updated ancientDb := makeDb() defer ancientDb.Close() - ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer ancient.Stop() if n, err := ancient.InsertHeaderChain(headers); err != nil { @@ -1065,7 +1065,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { // Import the chain as a light node and ensure all pointers are updated lightDb := makeDb() defer lightDb.Close() - light, _ := NewBlockChain(lightDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + light, _ := NewBlockChain(lightDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if n, err := light.InsertHeaderChain(headers); err != nil { t.Fatalf("failed to insert header %d: %v", n, err) } @@ -1138,7 +1138,7 @@ func testChainTxReorgs(t *testing.T, scheme string) { }) // Import the chain. This runs all block validation rules. db := rawdb.NewMemoryDatabase() - blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if i, err := blockchain.InsertChain(chain); err != nil { t.Fatalf("failed to insert original chain[%d]: %v", i, err) } @@ -1212,7 +1212,7 @@ func testLogReorgs(t *testing.T, scheme string) { signer = types.LatestSigner(gspec.Config) ) - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer blockchain.Stop() rmLogsCh := make(chan RemovedLogsEvent) @@ -1268,7 +1268,7 @@ func testLogRebirth(t *testing.T, scheme string) { gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}} signer = types.LatestSigner(gspec.Config) engine = ethash.NewFaker() - blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) + blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) ) defer blockchain.Stop() @@ -1349,7 +1349,7 @@ func testSideLogRebirth(t *testing.T, scheme string) { addr1 = crypto.PubkeyToAddress(key1.PublicKey) gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}} signer = types.LatestSigner(gspec.Config) - blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) ) defer blockchain.Stop() @@ -1448,7 +1448,7 @@ func testReorgSideEvent(t *testing.T, scheme string) { } signer = types.LatestSigner(gspec.Config) ) - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer blockchain.Stop() _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {}) @@ -1631,8 +1631,7 @@ func testEIP155Transition(t *testing.T, scheme string) { block.AddTx(tx) } }) - - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer blockchain.Stop() if _, err := blockchain.InsertChain(blocks); err != nil { @@ -1725,7 +1724,7 @@ func testEIP161AccountRemoval(t *testing.T, scheme string) { block.AddTx(tx) }) // account must exist pre eip 161 - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer blockchain.Stop() if _, err := blockchain.InsertChain(types.Blocks{blocks[0]}); err != nil { @@ -1740,7 +1739,7 @@ func testEIP161AccountRemoval(t *testing.T, scheme string) { t.Fatal(err) } if st, _ := blockchain.State(); st.Exist(theAddr) { - t.Error("account should not exist") + t.Error("account should not exist", "triExist?", st.TriHasAccount(theAddr), "SnapExist?", st.SnapHasAccount(theAddr)) } // account mustn't be created post eip 161 @@ -1783,7 +1782,7 @@ func testBlockchainHeaderchainReorgConsistency(t *testing.T, scheme string) { } // Import the canonical and fork chain side by side, verifying the current block // and current header consistency - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -1827,7 +1826,7 @@ func TestTrieForkGC(t *testing.T) { forks[i] = fork[0] } // Import the canonical and fork chain side by side, forcing the trie cache to cache both - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -1873,7 +1872,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) defer db.Close() - chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -1944,7 +1943,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) { t.Fatalf("failed to create temp freezer db: %v", err) } defer ancientDb.Close() - ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) headers := make([]*types.Header, len(blocks)) for i, block := range blocks { @@ -1964,7 +1963,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) { rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash()) // Reopen broken blockchain again - ancient, _ = NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ancient, _ = NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer ancient.Stop() if num := ancient.CurrentBlock().Number.Uint64(); num != 0 { t.Errorf("head block mismatch: have #%v, want #%v", num, 0) @@ -2016,7 +2015,7 @@ func testInsertReceiptChainRollback(t *testing.T, scheme string) { } defer ancientDb.Close() - ancientChain, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ancientChain, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer ancientChain.Stop() // Import the canonical header chain. @@ -2083,7 +2082,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) { diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) defer diskdb.Close() - chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -2145,7 +2144,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon mergeBlock = math.MaxInt32 ) // Generate and import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -2171,6 +2170,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon } nonce++ }) + if n, err := chain.InsertChain(blocks); err != nil { t.Fatalf("block %d: failed to insert into chain: %v", n, err) } @@ -2178,7 +2178,6 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon lastPrunedIndex := len(blocks) - TriesInMemory - 1 lastPrunedBlock := blocks[lastPrunedIndex] firstNonPrunedBlock := blocks[len(blocks)-TriesInMemory] - // Verify pruning of lastPrunedBlock if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) { t.Errorf("Block %d not pruned", lastPrunedBlock.NumberU64()) @@ -2187,7 +2186,6 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) { t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64()) } - // Activate the transition in the middle of the chain if mergePoint == 1 { merger.ReachTTD() @@ -2303,7 +2301,7 @@ func testInsertKnownChainData(t *testing.T, typ string, scheme string) { } defer chaindb.Close() - chain, err := NewBlockChain(chaindb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(chaindb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -2474,7 +2472,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i } defer chaindb.Close() - chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -2588,7 +2586,7 @@ func getLongAndShortChains(scheme string) (*BlockChain, []*types.Block, []*types genDb, longChain, _ := GenerateChainWithGenesis(genesis, engine, 80, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { return nil, nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err) } @@ -2724,6 +2722,191 @@ func testReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T, scheme strin } } +func TestTransactionIndices(t *testing.T) { + // Configure and generate a sample block chain + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + address = crypto.PubkeyToAddress(key.PublicKey) + funds = big.NewInt(100000000000000000) + gspec = &Genesis{ + Config: params.TestChainConfig, + Alloc: GenesisAlloc{address: {Balance: funds}}, + BaseFee: big.NewInt(params.InitialBaseFee), + } + signer = types.LatestSigner(gspec.Config) + ) + _, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 128, func(i int, block *BlockGen) { + tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key) + if err != nil { + panic(err) + } + block.AddTx(tx) + }) + + check := func(tail *uint64, chain *BlockChain) { + stored := rawdb.ReadTxIndexTail(chain.db) + if tail == nil && stored != nil { + t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored) + } + if tail != nil && *stored != *tail { + t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored) + } + if tail != nil { + for i := *tail; i <= chain.CurrentBlock().Number.Uint64(); i++ { + block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) + if block.Transactions().Len() == 0 { + continue + } + for _, tx := range block.Transactions() { + if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil { + t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex()) + } + } + } + for i := uint64(0); i < *tail; i++ { + block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) + if block.Transactions().Len() == 0 { + continue + } + for _, tx := range block.Transactions() { + if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil { + t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex()) + } + } + } + } + } + // Init block chain with external ancients, check all needed indices has been indexed. + limit := []uint64{0, 32, 64, 128} + for _, l := range limit { + frdir := t.TempDir() + ancientDb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) + rawdb.WriteAncientBlocks(ancientDb, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0)) + + l := l + chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, &l) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } + chain.indexBlocks(rawdb.ReadTxIndexTail(ancientDb), 128, make(chan struct{})) + + var tail uint64 + if l != 0 { + tail = uint64(128) - l + 1 + } + check(&tail, chain) + chain.Stop() + ancientDb.Close() + os.RemoveAll(frdir) + } + + // Reconstruct a block chain which only reserves HEAD-64 tx indices + ancientDb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + defer ancientDb.Close() + + rawdb.WriteAncientBlocks(ancientDb, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0)) + limit = []uint64{0, 64 /* drop stale */, 32 /* shorten history */, 64 /* extend history */, 0 /* restore all */} + for _, l := range limit { + l := l + chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, &l) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } + var tail uint64 + if l != 0 { + tail = uint64(128) - l + 1 + } + chain.indexBlocks(rawdb.ReadTxIndexTail(ancientDb), 128, make(chan struct{})) + check(&tail, chain) + chain.Stop() + } +} + +func TestSkipStaleTxIndicesInSnapSync(t *testing.T) { + testSkipStaleTxIndicesInSnapSync(t, rawdb.HashScheme) + testSkipStaleTxIndicesInSnapSync(t, rawdb.PathScheme) +} + +func testSkipStaleTxIndicesInSnapSync(t *testing.T, scheme string) { + // Configure and generate a sample block chain + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + address = crypto.PubkeyToAddress(key.PublicKey) + funds = big.NewInt(100000000000000000) + gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}} + signer = types.LatestSigner(gspec.Config) + ) + _, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 128, func(i int, block *BlockGen) { + tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key) + if err != nil { + panic(err) + } + block.AddTx(tx) + }) + + check := func(tail *uint64, chain *BlockChain) { + stored := rawdb.ReadTxIndexTail(chain.db) + if tail == nil && stored != nil { + t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored) + } + if tail != nil && *stored != *tail { + t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored) + } + if tail != nil { + for i := *tail; i <= chain.CurrentBlock().Number.Uint64(); i++ { + block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) + if block.Transactions().Len() == 0 { + continue + } + for _, tx := range block.Transactions() { + if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil { + t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex()) + } + } + } + for i := uint64(0); i < *tail; i++ { + block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) + if block.Transactions().Len() == 0 { + continue + } + for _, tx := range block.Transactions() { + if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil { + t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex()) + } + } + } + } + } + + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + if err != nil { + t.Fatalf("failed to create temp freezer db: %v", err) + } + defer ancientDb.Close() + + // Import all blocks into ancient db, only HEAD-32 indices are kept. + l := uint64(32) + chain, err := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, &l) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } + defer chain.Stop() + + headers := make([]*types.Header, len(blocks)) + for i, block := range blocks { + headers[i] = block.Header() + } + if n, err := chain.InsertHeaderChain(headers); err != nil { + t.Fatalf("failed to insert header %d: %v", n, err) + } + // The indices before ancient-N(32) should be ignored. After that all blocks should be indexed. + if n, err := chain.InsertReceiptChain(blocks, receipts, 64); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", n, err) + } + tail := uint64(32) + check(&tail, chain) +} + // Benchmarks large blocks with value transfers to non-existing accounts func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks int, recipientFn func(uint64) common.Address, dataFn func(uint64) []byte) { var ( @@ -2764,7 +2947,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in b.ResetTimer() for i := 0; i < b.N; i++ { // Import the shared chain and the original canonical one - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { b.Fatalf("failed to create tester chain: %v", err) } @@ -2851,7 +3034,7 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) { // Generate and import the canonical chain _, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, nil) - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -2951,7 +3134,7 @@ func testDeleteCreateRevert(t *testing.T, scheme string) { b.AddTx(tx) }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -3549,7 +3732,7 @@ func testEIP2718Transition(t *testing.T, scheme string) { }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -3643,7 +3826,7 @@ func testEIP1559Transition(t *testing.T, scheme string) { b.AddTx(tx) }) - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -3755,7 +3938,8 @@ func testSetCanonical(t *testing.T, scheme string) { diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) defer diskdb.Close() - chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, + vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -3864,7 +4048,7 @@ func testCanonicalHashMarker(t *testing.T, scheme string) { _, forkB, _ := GenerateChainWithGenesis(gspec, engine, c.forkB, func(i int, gen *BlockGen) {}) // Initialize test chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -3920,6 +4104,212 @@ func testCanonicalHashMarker(t *testing.T, scheme string) { } } +// TestTxIndexer tests the tx indexes are updated correctly. +func TestTxIndexer(t *testing.T) { + var ( + testBankKey, _ = crypto.GenerateKey() + testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey) + testBankFunds = big.NewInt(1000000000000000000) + + gspec = &Genesis{ + Config: params.TestChainConfig, + Alloc: GenesisAlloc{testBankAddress: {Balance: testBankFunds}}, + BaseFee: big.NewInt(params.InitialBaseFee), + } + engine = ethash.NewFaker() + nonce = uint64(0) + ) + _, blocks, receipts := GenerateChainWithGenesis(gspec, engine, 128, func(i int, gen *BlockGen) { + tx, _ := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("0xdeadbeef"), big.NewInt(1000), params.TxGas, big.NewInt(10*params.InitialBaseFee), nil), types.HomesteadSigner{}, testBankKey) + gen.AddTx(tx) + nonce += 1 + }) + + // verifyIndexes checks if the transaction indexes are present or not + // of the specified block. + verifyIndexes := func(db ethdb.Database, number uint64, exist bool) { + if number == 0 { + return + } + block := blocks[number-1] + for _, tx := range block.Transactions() { + lookup := rawdb.ReadTxLookupEntry(db, tx.Hash()) + if exist && lookup == nil { + t.Fatalf("missing %d %x", number, tx.Hash().Hex()) + } + if !exist && lookup != nil { + t.Fatalf("unexpected %d %x", number, tx.Hash().Hex()) + } + } + } + // verifyRange runs verifyIndexes for a range of blocks, from and to are included. + verifyRange := func(db ethdb.Database, from, to uint64, exist bool) { + for number := from; number <= to; number += 1 { + verifyIndexes(db, number, exist) + } + } + verify := func(db ethdb.Database, expTail uint64) { + tail := rawdb.ReadTxIndexTail(db) + if tail == nil { + t.Fatal("Failed to write tx index tail") + } + if *tail != expTail { + t.Fatalf("Unexpected tx index tail, want %v, got %d", expTail, *tail) + } + if *tail != 0 { + verifyRange(db, 0, *tail-1, false) + } + verifyRange(db, *tail, 128, true) + } + + var cases = []struct { + limitA uint64 + tailA uint64 + limitB uint64 + tailB uint64 + limitC uint64 + tailC uint64 + }{ + { + // LimitA: 0 + // TailA: 0 + // + // all blocks are indexed + limitA: 0, + tailA: 0, + + // LimitB: 1 + // TailB: 128 + // + // block-128 is indexed + limitB: 1, + tailB: 128, + + // LimitB: 64 + // TailB: 65 + // + // block [65, 128] are indexed + limitC: 64, + tailC: 65, + }, + { + // LimitA: 64 + // TailA: 65 + // + // block [65, 128] are indexed + limitA: 64, + tailA: 65, + + // LimitB: 1 + // TailB: 128 + // + // block-128 is indexed + limitB: 1, + tailB: 128, + + // LimitB: 64 + // TailB: 65 + // + // block [65, 128] are indexed + limitC: 64, + tailC: 65, + }, + { + // LimitA: 127 + // TailA: 2 + // + // block [2, 128] are indexed + limitA: 127, + tailA: 2, + + // LimitB: 1 + // TailB: 128 + // + // block-128 is indexed + limitB: 1, + tailB: 128, + + // LimitB: 64 + // TailB: 65 + // + // block [65, 128] are indexed + limitC: 64, + tailC: 65, + }, + { + // LimitA: 128 + // TailA: 1 + // + // block [2, 128] are indexed + limitA: 128, + tailA: 1, + + // LimitB: 1 + // TailB: 128 + // + // block-128 is indexed + limitB: 1, + tailB: 128, + + // LimitB: 64 + // TailB: 65 + // + // block [65, 128] are indexed + limitC: 64, + tailC: 65, + }, + { + // LimitA: 129 + // TailA: 0 + // + // block [0, 128] are indexed + limitA: 129, + tailA: 0, + + // LimitB: 1 + // TailB: 128 + // + // block-128 is indexed + limitB: 1, + tailB: 128, + + // LimitB: 64 + // TailB: 65 + // + // block [65, 128] are indexed + limitC: 64, + tailC: 65, + }, + } + for _, c := range cases { + frdir := t.TempDir() + db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) + rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0)) + + // Index the initial blocks from ancient store + chain, _ := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, &c.limitA) + chain.indexBlocks(nil, 128, make(chan struct{})) + verify(db, c.tailA) + + chain.SetTxLookupLimit(c.limitB) + chain.indexBlocks(rawdb.ReadTxIndexTail(db), 128, make(chan struct{})) + verify(db, c.tailB) + + chain.SetTxLookupLimit(c.limitC) + chain.indexBlocks(rawdb.ReadTxIndexTail(db), 128, make(chan struct{})) + verify(db, c.tailC) + + // Recover all indexes + chain.SetTxLookupLimit(0) + chain.indexBlocks(rawdb.ReadTxIndexTail(db), 128, make(chan struct{})) + verify(db, 0) + + chain.Stop() + db.Close() + os.RemoveAll(frdir) + } +} + func TestCreateThenDeletePreByzantium(t *testing.T) { // We use Ropsten chain config instead of Testchain config, this is // deliberate: we want to use pre-byz rules where we have intermediate state roots @@ -4113,7 +4503,7 @@ func TestDeleteThenCreate(t *testing.T) { } }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go index b46b898afb..12a0b00b0e 100644 --- a/core/chain_makers_test.go +++ b/core/chain_makers_test.go @@ -124,7 +124,7 @@ func TestGeneratePOSChain(t *testing.T) { }) // Import the chain. This runs all block validation rules. - blockchain, _ := NewBlockChain(db, nil, gspec, nil, beacon.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(db, nil, gspec, nil, beacon.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer blockchain.Stop() if i, err := blockchain.InsertChain(genchain); err != nil { @@ -198,7 +198,7 @@ func ExampleGenerateChain() { db = rawdb.NewMemoryDatabase() genDb = rawdb.NewMemoryDatabase() ) - + // Ensure that key1 has some funds in the genesis block. gspec := &Genesis{ Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, @@ -239,7 +239,7 @@ func ExampleGenerateChain() { }) // Import the chain. This runs all block validation rules. - blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.HashScheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.HashScheme), gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer blockchain.Stop() if i, err := blockchain.InsertChain(chain); err != nil { diff --git a/core/dao_test.go b/core/dao_test.go index b9a899ef2f..3d3192f5bd 100644 --- a/core/dao_test.go +++ b/core/dao_test.go @@ -50,7 +50,7 @@ func TestDAOForkRangeExtradata(t *testing.T) { BaseFee: big.NewInt(params.InitialBaseFee), Config: &proConf, } - proBc, _ := NewBlockChain(proDb, nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + proBc, _ := NewBlockChain(proDb, nil, progspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer proBc.Stop() conDb := rawdb.NewMemoryDatabase() @@ -62,7 +62,7 @@ func TestDAOForkRangeExtradata(t *testing.T) { BaseFee: big.NewInt(params.InitialBaseFee), Config: &conConf, } - conBc, _ := NewBlockChain(conDb, nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + conBc, _ := NewBlockChain(conDb, nil, congspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer conBc.Stop() if _, err := proBc.InsertChain(prefix); err != nil { @@ -74,7 +74,7 @@ func TestDAOForkRangeExtradata(t *testing.T) { // Try to expand both pro-fork and non-fork chains iteratively with other camp's blocks for i := int64(0); i < params.DAOForkExtraRange.Int64(); i++ { // Create a pro-fork block, and try to feed into the no-fork chain - bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().Number.Uint64())) for j := 0; j < len(blocks)/2; j++ { @@ -97,7 +97,7 @@ func TestDAOForkRangeExtradata(t *testing.T) { t.Fatalf("contra-fork chain didn't accepted no-fork block: %v", err) } // Create a no-fork block, and try to feed into the pro-fork chain - bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().Number.Uint64())) for j := 0; j < len(blocks)/2; j++ { @@ -121,7 +121,7 @@ func TestDAOForkRangeExtradata(t *testing.T) { } } // Verify that contra-forkers accept pro-fork extra-datas after forking finishes - bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer bc.Stop() blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().Number.Uint64())) @@ -139,7 +139,7 @@ func TestDAOForkRangeExtradata(t *testing.T) { t.Fatalf("contra-fork chain didn't accept pro-fork block post-fork: %v", err) } // Verify that pro-forkers accept contra-fork extra-datas after forking finishes - bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer bc.Stop() blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().Number.Uint64())) diff --git a/core/error.go b/core/error.go index 8c691b17ff..0e8f8286c2 100644 --- a/core/error.go +++ b/core/error.go @@ -113,4 +113,7 @@ var ( // ErrSystemTxNotSupported is returned for any deposit tx with IsSystemTx=true after the Regolith fork ErrSystemTxNotSupported = errors.New("system tx not supported") + + // ErrParallelUnexpectedConflict is returned when execution finally get conflict error for more than block tx number + ErrParallelUnexpectedConflict = errors.New("parallel execution unexpected conflict") ) diff --git a/core/genesis_test.go b/core/genesis_test.go index 61be0bd252..6b70c2774e 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -133,7 +133,7 @@ func testSetupGenesis(t *testing.T, scheme string) { tdb := triedb.NewDatabase(db, newDbConfig(scheme)) oldcustomg.Commit(db, tdb) - bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil) + bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer bc.Stop() _, blocks, _ := GenerateChainWithGenesis(&oldcustomg, ethash.NewFaker(), 4, nil) diff --git a/core/parallel_state_processor.go b/core/parallel_state_processor.go new file mode 100644 index 0000000000..5c47bd1b14 --- /dev/null +++ b/core/parallel_state_processor.go @@ -0,0 +1,822 @@ +package core + +import ( + "errors" + "fmt" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "runtime" + "sync" + "sync/atomic" +) + +const ( + parallelPrimarySlot = 0 + parallelShadowSlot = 1 + stage2CheckNumber = 30 // ConfirmStage2 will check this number of transaction, to avoid too busy stage2 check + stage2AheadNum = 3 // enter ConfirmStage2 in advance to avoid waiting for Fat Tx +) + +type ParallelStateProcessor struct { + StateProcessor + parallelNum int // leave a CPU to dispatcher + slotState []*SlotState // idle, or pending messages + allTxReqs []*ParallelTxRequest + txResultChan chan *ParallelTxResult // to notify dispatcher that a tx is done + mergedTxIndex int // the latest finalized tx index, fixme: use Atomic + pendingConfirmResults map[int][]*ParallelTxResult // tx could be executed several times, with several result to check + unconfirmedResults *sync.Map // this is for stage2 confirm, since pendingConfirmResults can not be accessed in stage2 loop + unconfirmedDBs *sync.Map + slotDBsToRelease []*state.ParallelStateDB + stopSlotChan chan struct{} + stopConfirmChan chan struct{} + debugConflictRedoNum int + // start for confirm stage2 + confirmStage2Chan chan int + stopConfirmStage2Chan chan struct{} + txReqExecuteRecord map[int]int + txReqExecuteCount int + inConfirmStage2 bool + targetStage2Count int // when executed txNUM reach it, enter stage2 RT confirm + nextStage2TxIndex int +} + +func NewParallelStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine, parallelNum int) *ParallelStateProcessor { + processor := &ParallelStateProcessor{ + StateProcessor: *NewStateProcessor(config, bc, engine), + parallelNum: parallelNum, + } + processor.init() + return processor +} + +type MergedTxInfo struct { + slotDB *state.StateDB // used for SlotDb reuse only, otherwise, it can be discarded + StateObjectSuicided map[common.Address]struct{} + StateChangeSet map[common.Address]state.StateKeys + BalanceChangeSet map[common.Address]struct{} + CodeChangeSet map[common.Address]struct{} + AddrStateChangeSet map[common.Address]struct{} + txIndex int +} + +type SlotState struct { + pendingTxReqList []*ParallelTxRequest + primaryWakeUpChan chan struct{} + shadowWakeUpChan chan struct{} + primaryStopChan chan struct{} + shadowStopChan chan struct{} + activatedType int32 // 0: primary slot, 1: shadow slot +} + +type ParallelTxResult struct { + executedIndex int32 // the TxReq can be executed several time, increase index for each execution + slotIndex int // slot index + txReq *ParallelTxRequest + receipt *types.Receipt + slotDB *state.ParallelStateDB // if updated, it is not equal to txReq.slotDB + gpSlot *GasPool + evm *vm.EVM + result *ExecutionResult + originalNonce *uint64 + err error +} + +type ParallelTxRequest struct { + txIndex int + baseStateDB *state.StateDB + staticSlotIndex int // static dispatched id + tx *types.Transaction + gasLimit uint64 + msg *Message + block *types.Block + vmConfig vm.Config + usedGas *uint64 + curTxChan chan int + systemAddrRedo bool + runnable int32 // 0: not runnable, 1: runnable + executedNum int32 + retryNum int32 +} + +// to create and start the execution slot goroutines +func (p *ParallelStateProcessor) init() { + log.Info("Parallel execution mode is enabled", "Parallel Num", p.parallelNum, + "CPUNum", runtime.NumCPU()) + p.txResultChan = make(chan *ParallelTxResult, 200) + p.stopSlotChan = make(chan struct{}, 1) + p.stopConfirmChan = make(chan struct{}, 1) + p.stopConfirmStage2Chan = make(chan struct{}, 1) + + p.slotState = make([]*SlotState, p.parallelNum) + for i := 0; i < p.parallelNum; i++ { + p.slotState[i] = &SlotState{ + primaryWakeUpChan: make(chan struct{}, 1), + shadowWakeUpChan: make(chan struct{}, 1), + primaryStopChan: make(chan struct{}, 1), + shadowStopChan: make(chan struct{}, 1), + } + // start the primary slot's goroutine + go func(slotIndex int) { + p.runSlotLoop(slotIndex, parallelPrimarySlot) // this loop will be permanent live + }(i) + + // start the shadow slot. + // It is back up of the primary slot to make sure transaction can be redone ASAP, + // since the primary slot could be busy at executing another transaction + go func(slotIndex int) { + p.runSlotLoop(slotIndex, parallelShadowSlot) // this loop will be permanent live + }(i) + + } + + p.confirmStage2Chan = make(chan int, 10) + go func() { + p.runConfirmStage2Loop() // this loop will be permanent live + }() +} + +// clear slot state for each block. +func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { + if txNum == 0 { + return + } + p.mergedTxIndex = -1 + p.debugConflictRedoNum = 0 + p.inConfirmStage2 = false + + statedb.PrepareForParallel() + p.allTxReqs = make([]*ParallelTxRequest, 0) + p.slotDBsToRelease = make([]*state.ParallelStateDB, 0, txNum) + + stateDBsToRelease := p.slotDBsToRelease + go func() { + for _, slotDB := range stateDBsToRelease { + slotDB.PutSyncPool() + } + }() + for _, slot := range p.slotState { + slot.pendingTxReqList = make([]*ParallelTxRequest, 0) + slot.activatedType = parallelPrimarySlot + } + p.unconfirmedResults = new(sync.Map) + p.unconfirmedDBs = new(sync.Map) + p.pendingConfirmResults = make(map[int][]*ParallelTxResult, 200) + p.txReqExecuteRecord = make(map[int]int, 200) + p.txReqExecuteCount = 0 + p.nextStage2TxIndex = 0 +} + +// Benefits of StaticDispatch: +// +// ** try best to make Txs with same From() in same slot +// ** reduce IPC cost by dispatch in Unit +// ** make sure same From in same slot +// ** try to make it balanced, queue to the most hungry slot for new Address +func (p *ParallelStateProcessor) doStaticDispatch(txReqs []*ParallelTxRequest) { + fromSlotMap := make(map[common.Address]int, 100) + toSlotMap := make(map[common.Address]int, 100) + for _, txReq := range txReqs { + var slotIndex = -1 + if i, ok := fromSlotMap[txReq.msg.From]; ok { + // first: same From are all in same slot + slotIndex = i + } else if txReq.msg.To != nil { + // To Address, with txIndex sorted, could be in different slot. + if i, ok := toSlotMap[*txReq.msg.To]; ok { + slotIndex = i + } + } + + // not found, dispatch to most hungry slot + if slotIndex == -1 { + var workload = len(p.slotState[0].pendingTxReqList) + slotIndex = 0 + for i, slot := range p.slotState { // can start from index 1 + if len(slot.pendingTxReqList) < workload { + slotIndex = i + workload = len(slot.pendingTxReqList) + } + } + } + // update + fromSlotMap[txReq.msg.From] = slotIndex + if txReq.msg.To != nil { + toSlotMap[*txReq.msg.To] = slotIndex + } + + slot := p.slotState[slotIndex] + txReq.staticSlotIndex = slotIndex // txReq is better to be executed in this slot + slot.pendingTxReqList = append(slot.pendingTxReqList, txReq) + } +} + +// do conflict detect +func (p *ParallelStateProcessor) hasConflict(txResult *ParallelTxResult, isStage2 bool) bool { + slotDB := txResult.slotDB + if txResult.err != nil { + return true + } else if slotDB.NeedsRedo() { + // if this is any reason that indicates this transaction needs to redo, skip the conflict check + return true + } else { + // to check if what the slot db read is correct. + if !slotDB.IsParallelReadsValid(isStage2) { + return true + } + } + return false +} + +func (p *ParallelStateProcessor) switchSlot(slotIndex int) { + slot := p.slotState[slotIndex] + if atomic.CompareAndSwapInt32(&slot.activatedType, parallelPrimarySlot, parallelShadowSlot) { + // switch from normal to shadow slot + if len(slot.shadowWakeUpChan) == 0 { + slot.shadowWakeUpChan <- struct{}{} // only notify when target once + } + } else if atomic.CompareAndSwapInt32(&slot.activatedType, parallelShadowSlot, parallelPrimarySlot) { + // switch from shadow to normal slot + if len(slot.primaryWakeUpChan) == 0 { + slot.primaryWakeUpChan <- struct{}{} // only notify when target once + } + } +} + +func (p *ParallelStateProcessor) executeInSlot(slotIndex int, txReq *ParallelTxRequest) *ParallelTxResult { + atomic.AddInt32(&txReq.executedNum, 1) + slotDB := state.NewSlotDB(txReq.baseStateDB, txReq.txIndex, p.mergedTxIndex, p.unconfirmedDBs) + + blockContext := NewEVMBlockContext(txReq.block.Header(), p.bc, nil, p.config, slotDB) // can share blockContext within a block for efficiency + txContext := NewEVMTxContext(txReq.msg) + vmenv := vm.NewEVM(blockContext, txContext, slotDB, p.config, txReq.vmConfig) + + rules := p.config.Rules(txReq.block.Number(), blockContext.Random != nil, blockContext.Time) + slotDB.Prepare(rules, txReq.msg.From, vmenv.Context.Coinbase, txReq.msg.To, vm.ActivePrecompiles(rules), txReq.msg.AccessList) + + // gasLimit not accurate, but it is ok for block import. + // each slot would use its own gas pool, and will do gas limit check later + gpSlot := new(GasPool).AddGas(txReq.gasLimit) // block.GasLimit() + + on := txReq.tx.Nonce() + if txReq.msg.IsDepositTx && p.config.IsOptimismRegolith(vmenv.Context.Time) { + on = txReq.baseStateDB.GetNonce(txReq.msg.From) + } + + slotDB.SetTxContext(txReq.tx.Hash(), txReq.txIndex) + + evm, result, err := applyTransactionStageExecution(txReq.msg, gpSlot, slotDB, vmenv) + txResult := ParallelTxResult{ + executedIndex: atomic.LoadInt32(&txReq.executedNum), + slotIndex: slotIndex, + txReq: txReq, + receipt: nil, // receipt is generated in finalize stage + slotDB: slotDB, + err: err, + gpSlot: gpSlot, + evm: evm, + result: result, + originalNonce: &on, + } + + if err == nil { + p.unconfirmedDBs.Store(txReq.txIndex, slotDB) + } else { + // the transaction failed at check(nonce or balance), actually it has not been executed yet. + atomic.CompareAndSwapInt32(&txReq.runnable, 0, 1) + // the error could be caused by unconfirmed balance reference, + // the balance could insufficient to pay its gas limit, which cause it preCheck.buyGas() failed + // redo could solve it. + log.Debug("In slot execution error", "error", err, + "slotIndex", slotIndex, "txIndex", txReq.txIndex) + } + p.unconfirmedResults.Store(txReq.txIndex, &txResult) + return &txResult +} + +// to confirm a serial TxResults with same txIndex +func (p *ParallelStateProcessor) toConfirmTxIndex(targetTxIndex int, isStage2 bool) *ParallelTxResult { + if isStage2 { + if targetTxIndex <= p.mergedTxIndex+1 { + // `p.mergedTxIndex+1` is the one to be merged, + // in stage2, we do likely conflict check, for these not their turn. + return nil + } + } + + for { + // handle a targetTxIndex in a loop + var targetResult *ParallelTxResult + if isStage2 { + result, ok := p.unconfirmedResults.Load(targetTxIndex) + if !ok { + return nil + } + targetResult = result.(*ParallelTxResult) + + // in stage 2, don't schedule a new redo if the TxReq is: + // a.runnable: it will be redone + // b.running: the new result will be more reliable, we skip check right now + if atomic.LoadInt32(&targetResult.txReq.runnable) == 1 { + return nil + } + if targetResult.executedIndex < atomic.LoadInt32(&targetResult.txReq.executedNum) { + // skip the intermediate result that is not the latest. + return nil + } + } else { + // pop one result as target result. + results := p.pendingConfirmResults[targetTxIndex] + resultsLen := len(results) + if resultsLen == 0 { // there is no pending result can be verified, break and wait for incoming results + return nil + } + targetResult = results[len(results)-1] + // last is the freshest, stack based priority + p.pendingConfirmResults[targetTxIndex] = p.pendingConfirmResults[targetTxIndex][:resultsLen-1] // remove from the queue + } + + valid := p.toConfirmTxIndexResult(targetResult, isStage2) + if !valid { + staticSlotIndex := targetResult.txReq.staticSlotIndex // it is better to run the TxReq in its static dispatch slot + if isStage2 { + atomic.CompareAndSwapInt32(&targetResult.txReq.runnable, 0, 1) // needs redo + p.debugConflictRedoNum++ + // interrupt the slot's current routine, and switch to the other routine + p.switchSlot(staticSlotIndex) + return nil + } + + if len(p.pendingConfirmResults[targetTxIndex]) == 0 { // this is the last result to check, and it is not valid + blockTxCount := targetResult.txReq.block.Transactions().Len() + // This means that the tx has been executed more than blockTxCount times, so it exits with the error. + // TODO-dav: p.mergedTxIndex+2 may be more reasonable? - this is buggy for expected exit + if targetResult.txReq.txIndex == p.mergedTxIndex+1 { + // txReq is the next to merge + if atomic.LoadInt32(&targetResult.txReq.retryNum) <= int32(blockTxCount)+3000 { + atomic.AddInt32(&targetResult.txReq.retryNum, 1) + // conflict retry + } else { + // retry 100 times and still conflict, either the tx is expected to be wrong, or something wrong. + if targetResult.err != nil { + fmt.Printf("!!!!!!!!!!! Parallel execution exited with error!!!!!, txIndex:%d, err: %v\n", targetResult.txReq.txIndex, targetResult.err) + return targetResult + } else { + // abnormal exit with conflict error, need check the parallel algorithm + targetResult.err = ErrParallelUnexpectedConflict + + fmt.Printf("!!!!!!!!!!! Parallel execution exited unexpected conflict!!!!!, txIndex:%d\n", targetResult.txReq.txIndex) + + return targetResult + } + } + } + atomic.CompareAndSwapInt32(&targetResult.txReq.runnable, 0, 1) // needs redo + p.debugConflictRedoNum++ + // interrupt its current routine, and switch to the other routine + p.switchSlot(staticSlotIndex) + return nil + } + continue + } + if isStage2 { + // likely valid, but not sure, can not deliver + return nil + } + return targetResult + } +} + +// to confirm one txResult, return true if the result is valid +// if it is in Stage 2 it is a likely result, not 100% sure +func (p *ParallelStateProcessor) toConfirmTxIndexResult(txResult *ParallelTxResult, isStage2 bool) bool { + txReq := txResult.txReq + if p.hasConflict(txResult, isStage2) { + log.Debug("HasConflict!! block: %d, txIndex: %d\n", txResult.txReq.block.NumberU64(), txResult.txReq.txIndex) + return false + } + if isStage2 { // not its turn + return true // likely valid, not sure, not finalized right now. + } + + // goroutine unsafe operation will be handled from here for safety + gasConsumed := txReq.gasLimit - txResult.gpSlot.Gas() + if gasConsumed != txResult.result.UsedGas { + log.Error("gasConsumed != result.UsedGas mismatch", + "gasConsumed", gasConsumed, "result.UsedGas", txResult.result.UsedGas) + } + + // ok, time to do finalize, stage2 should not be parallel + header := txReq.block.Header() + txResult.receipt, txResult.err = applyTransactionStageFinalization(txResult.evm, txResult.result, + *txReq.msg, p.config, txResult.slotDB, header, + txReq.tx, txReq.usedGas, txResult.originalNonce) + return true +} + +func (p *ParallelStateProcessor) runSlotLoop(slotIndex int, slotType int32) { + curSlot := p.slotState[slotIndex] + var wakeupChan chan struct{} + var stopChan chan struct{} + + if slotType == parallelPrimarySlot { + wakeupChan = curSlot.primaryWakeUpChan + stopChan = curSlot.primaryStopChan + } else { + wakeupChan = curSlot.shadowWakeUpChan + stopChan = curSlot.shadowStopChan + } + for { + select { + case <-stopChan: + p.stopSlotChan <- struct{}{} + continue + case <-wakeupChan: + } + + interrupted := false + for _, txReq := range curSlot.pendingTxReqList { + if txReq.txIndex <= p.mergedTxIndex { + continue + } + + if atomic.LoadInt32(&curSlot.activatedType) != slotType { + interrupted = true + // fmt.Printf("Dav -- runInLoop, - activatedType - TxREQ: %d\n", txReq.txIndex) + + break + } + if !atomic.CompareAndSwapInt32(&txReq.runnable, 1, 0) { + // not swapped: txReq.runnable == 0 + //fmt.Printf("Dav -- runInLoop, - not runnable - TxREQ: %d\n", txReq.txIndex) + continue + } + // fmt.Printf("Dav -- runInLoop, - executeInSlot - TxREQ: %d\n", txReq.txIndex) + p.txResultChan <- p.executeInSlot(slotIndex, txReq) + // fmt.Printf("Dav -- runInLoop, - loopbody tail - TxREQ: %d\n", txReq.txIndex) + } + // switched to the other slot. + if interrupted { + continue + } + + // txReq in this Slot have all been executed, try steal one from other slot. + // as long as the TxReq is runnable, we steal it, mark it as stolen + for _, stealTxReq := range p.allTxReqs { + // fmt.Printf("Dav -- stealLoop, handle TxREQ: %d\n", stealTxReq.txIndex) + if stealTxReq.txIndex <= p.mergedTxIndex { + // fmt.Printf("Dav -- stealLoop, - txReq.txIndex <= p.mergedTxIndex - TxREQ: %d\n", stealTxReq.txIndex) + continue + } + if atomic.LoadInt32(&curSlot.activatedType) != slotType { + interrupted = true + // fmt.Printf("Dav -- stealLoop, - activatedType - TxREQ: %d\n", stealTxReq.txIndex) + + break + } + + if !atomic.CompareAndSwapInt32(&stealTxReq.runnable, 1, 0) { + // not swapped: txReq.runnable == 0 + // fmt.Printf("Dav -- stealLoop, - not runnable - TxREQ: %d\n", stealTxReq.txIndex) + + continue + } + // fmt.Printf("Dav -- stealLoop, - executeInSlot - TxREQ: %d\n", stealTxReq.txIndex) + p.txResultChan <- p.executeInSlot(slotIndex, stealTxReq) + // fmt.Printf("Dav -- stealLoop, - loopbody tail - TxREQ: %d\n", stealTxReq.txIndex) + } + } +} + +func (p *ParallelStateProcessor) runConfirmStage2Loop() { + for { + // var mergedTxIndex int + select { + case <-p.stopConfirmStage2Chan: + for len(p.confirmStage2Chan) > 0 { + <-p.confirmStage2Chan + } + p.stopSlotChan <- struct{}{} + continue + case <-p.confirmStage2Chan: + for len(p.confirmStage2Chan) > 0 { + <-p.confirmStage2Chan // drain the chan to get the latest merged txIndex + } + } + // stage 2,if all tx have been executed at least once, and its result has been received. + // in Stage 2, we will run check when merge is advanced. + // more aggressive tx result confirm, even for these Txs not in turn + // now we will be more aggressive: + // do conflict check , as long as tx result is generated, + // if lucky, it is the Tx's turn, we will do conflict check with WBNB makeup + // otherwise, do conflict check without WBNB makeup, but we will ignore WBNB's balance conflict. + // throw these likely conflicted tx back to re-execute + startTxIndex := p.mergedTxIndex + 2 // stage 2's will start from the next target merge index + endTxIndex := startTxIndex + stage2CheckNumber + txSize := len(p.allTxReqs) + if endTxIndex > (txSize - 1) { + endTxIndex = txSize - 1 + } + log.Debug("runConfirmStage2Loop", "startTxIndex", startTxIndex, "endTxIndex", endTxIndex) + // conflictNumMark := p.debugConflictRedoNum + for txIndex := startTxIndex; txIndex < endTxIndex; txIndex++ { + p.toConfirmTxIndex(txIndex, true) + } + // make sure all slots are wake up + for i := 0; i < p.parallelNum; i++ { + p.switchSlot(i) + } + } + +} + +func (p *ParallelStateProcessor) handleTxResults() *ParallelTxResult { + confirmedResult := p.toConfirmTxIndex(p.mergedTxIndex+1, false) + if confirmedResult == nil { + return nil + } + // schedule stage 2 when new Tx has been merged, schedule once and ASAP + // stage 2,if all tx have been executed at least once, and its result has been received. + // in Stage 2, we will run check when main DB is advanced, i.e., new Tx result has been merged. + if p.inConfirmStage2 && p.mergedTxIndex >= p.nextStage2TxIndex { + p.nextStage2TxIndex = p.mergedTxIndex + stage2CheckNumber + p.confirmStage2Chan <- p.mergedTxIndex + } + return confirmedResult +} + +// wait until the next Tx is executed and its result is merged to the main stateDB +func (p *ParallelStateProcessor) confirmTxResults(statedb *state.StateDB, gp *GasPool) *ParallelTxResult { + result := p.handleTxResults() + if result == nil { + return nil + } + // ok, the tx result is valid and can be merged + if result.err != nil { + return result + } + + if err := gp.SubGas(result.receipt.GasUsed); err != nil { + log.Error("gas limit reached", "block", result.txReq.block.Number(), + "txIndex", result.txReq.txIndex, "GasUsed", result.receipt.GasUsed, "gp.Gas", gp.Gas()) + } + + resultTxIndex := result.txReq.txIndex + + var root []byte + header := result.txReq.block.Header() + if p.config.IsByzantium(header.Number) { + result.slotDB.FinaliseForParallel(true, statedb) + } else { + root = result.slotDB.IntermediateRootForSlotDB(p.config.IsEIP158(header.Number), statedb).Bytes() + } + result.receipt.PostState = root + // merge slotDB into mainDB + statedb.MergeSlotDB(result.slotDB, result.receipt, resultTxIndex) + + if resultTxIndex != p.mergedTxIndex+1 { + log.Error("ProcessParallel tx result out of order", "resultTxIndex", resultTxIndex, + "p.mergedTxIndex", p.mergedTxIndex) + } + p.mergedTxIndex = resultTxIndex + + return result +} + +func (p *ParallelStateProcessor) doCleanUp() { + // 1.clean up all slot: primary and shadow, to make sure they are stopped + for _, slot := range p.slotState { + slot.primaryStopChan <- struct{}{} + slot.shadowStopChan <- struct{}{} + <-p.stopSlotChan + <-p.stopSlotChan + } + // 2.discard delayed txResults if any + for { + if len(p.txResultChan) > 0 { // drop prefetch addr? + <-p.txResultChan + continue + } + break + } + // 3.make sure the confirmation routine is stopped + p.stopConfirmStage2Chan <- struct{}{} + <-p.stopSlotChan +} + +// Implement BEP-130: Parallel Transaction Execution. +func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) { + var ( + receipts types.Receipts + usedGas = new(uint64) + header = block.Header() + gp = new(GasPool).AddGas(block.GasLimit()) + ) + + // Mutate the block and state according to any hard-fork specs + if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 { + misc.ApplyDAOHardFork(statedb) + } + if p.config.PreContractForkBlock != nil && p.config.PreContractForkBlock.Cmp(block.Number()) == 0 { + misc.ApplyPreContractHardFork(statedb) + } + + txNum := len(block.Transactions()) + p.resetState(txNum, statedb) + + // Iterate over and process the individual transactions + commonTxs := make([]*types.Transaction, 0, txNum) + + var ( + // with parallel mode, vmenv will be created inside of slot + blockContext = NewEVMBlockContext(block.Header(), p.bc, nil, p.config, statedb) + vmenv = vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) + signer = types.MakeSigner(p.bc.chainConfig, block.Number(), block.Time()) + ) + + if beaconRoot := block.BeaconRoot(); beaconRoot != nil { + ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb) + } + + // var txReqs []*ParallelTxRequest + for i, tx := range block.Transactions() { + // can be moved it into slot for efficiency, but signer is not concurrent safe + // Parallel Execution 1.0&2.0 is for full sync mode, Nonce PreCheck is not necessary + // And since we will do out-of-order execution, the Nonce PreCheck could fail. + // We will disable it and leave it to Parallel 3.0 which is for validator mode + msg, err := TransactionToMessage(tx, signer, header.BaseFee) + if err != nil { + return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) + } + + // parallel start, wrap an exec message, which will be dispatched to a slot + txReq := &ParallelTxRequest{ + txIndex: i, + baseStateDB: statedb, + staticSlotIndex: -1, + tx: tx, + gasLimit: block.GasLimit(), // gp.Gas(). + msg: msg, + block: block, + vmConfig: cfg, + usedGas: usedGas, + curTxChan: make(chan int, 1), + systemAddrRedo: false, // set to true, when systemAddr access is detected. + runnable: 1, // 0: not runnable, 1: runnable + executedNum: 0, + retryNum: 0, + } + p.allTxReqs = append(p.allTxReqs, txReq) + } + // set up stage2 enter criteria + p.targetStage2Count = len(p.allTxReqs) + if p.targetStage2Count > 50 { + // usually, the last Tx could be the bottleneck it could be very slow, + // so it is better for us to enter stage 2 a bit earlier + p.targetStage2Count = p.targetStage2Count - stage2AheadNum + } + + p.doStaticDispatch(p.allTxReqs) // todo: put txReqs in unit? + + // after static dispatch, we notify the slot to work. + for _, slot := range p.slotState { + slot.primaryWakeUpChan <- struct{}{} + } + + // wait until all Txs have processed. + for { + if len(commonTxs) == txNum { + // put it ahead of chan receive to avoid waiting for empty block + break + } + unconfirmedResult := <-p.txResultChan + unconfirmedTxIndex := unconfirmedResult.txReq.txIndex + if unconfirmedTxIndex <= p.mergedTxIndex { + // log.Warn("drop merged txReq", "unconfirmedTxIndex", unconfirmedTxIndex, "p.mergedTxIndex", p.mergedTxIndex) + continue + } + p.pendingConfirmResults[unconfirmedTxIndex] = append(p.pendingConfirmResults[unconfirmedTxIndex], unconfirmedResult) + + // schedule prefetch once only when unconfirmedResult is valid + if unconfirmedResult.err == nil { + if _, ok := p.txReqExecuteRecord[unconfirmedTxIndex]; !ok { + p.txReqExecuteRecord[unconfirmedTxIndex] = 0 + p.txReqExecuteCount++ + statedb.AddrPrefetch(unconfirmedResult.slotDB) // todo: prefetch when it is not merged + // enter stage2, RT confirm + if !p.inConfirmStage2 && p.txReqExecuteCount == p.targetStage2Count { + p.inConfirmStage2 = true + } + } + p.txReqExecuteRecord[unconfirmedTxIndex]++ + } + + for { + result := p.confirmTxResults(statedb, gp) + if result == nil { + break + } + // update tx result + if result.err != nil { + log.Error("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex, + "resultTxIndex", result.txReq.txIndex, "result.err", result.err) + p.doCleanUp() + return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err) + } + commonTxs = append(commonTxs, result.txReq.tx) + receipts = append(receipts, result.receipt) + } + } + + // to do clean up when the block is processed + p.doCleanUp() + + // len(commonTxs) could be 0, such as: https://bscscan.com/block/14580486 + if len(commonTxs) > 0 { + log.Info("ProcessParallel tx all done", "block", header.Number, "usedGas", *usedGas, + "txNum", txNum, + "len(commonTxs)", len(commonTxs), + "conflictNum", p.debugConflictRedoNum, + "redoRate(%)", 100*(p.debugConflictRedoNum)/len(commonTxs)) + } + + // Fail if Shanghai not enabled and len(withdrawals) is non-zero. + withdrawals := block.Withdrawals() + if len(withdrawals) > 0 && !p.config.IsShanghai(block.Number(), block.Time()) { + return nil, nil, 0, errors.New("withdrawals before shanghai") + } + // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) + p.engine.Finalize(p.bc, header, statedb, commonTxs, block.Uncles(), withdrawals) + + var allLogs []*types.Log + for _, receipt := range receipts { + allLogs = append(allLogs, receipt.Logs...) + } + return receipts, allLogs, *usedGas, nil +} + +func applyTransactionStageExecution(msg *Message, gp *GasPool, statedb *state.ParallelStateDB, evm *vm.EVM) (*vm.EVM, *ExecutionResult, error) { + // Create a new context to be used in the EVM environment. + txContext := NewEVMTxContext(msg) + evm.Reset(txContext, statedb) + + // Apply the transaction to the current state (included in the env). + result, err := ApplyMessage(evm, msg, gp) + + if err != nil { + return nil, nil, err + } + + return evm, result, err +} + +func applyTransactionStageFinalization(evm *vm.EVM, result *ExecutionResult, msg Message, config *params.ChainConfig, + statedb *state.ParallelStateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, nonce *uint64) (*types.Receipt, error) { + + *usedGas += result.UsedGas + + // Create a new receipt for the transaction, storing the intermediate root and gas used + // by the tx. + receipt := &types.Receipt{Type: tx.Type(), PostState: nil, CumulativeGasUsed: *usedGas} + if result.Failed() { + receipt.Status = types.ReceiptStatusFailed + } else { + receipt.Status = types.ReceiptStatusSuccessful + } + receipt.TxHash = tx.Hash() + receipt.GasUsed = result.UsedGas + + if msg.IsDepositTx && config.IsOptimismRegolith(evm.Context.Time) { + // The actual nonce for deposit transactions is only recorded from Regolith onwards and + // otherwise must be nil. + receipt.DepositNonce = nonce + // The DepositReceiptVersion for deposit transactions is only recorded from Canyon onwards + // and otherwise must be nil. + if config.IsOptimismCanyon(evm.Context.Time) { + receipt.DepositReceiptVersion = new(uint64) + *receipt.DepositReceiptVersion = types.CanyonDepositReceiptVersion + } + } + if tx.Type() == types.BlobTxType { + receipt.BlobGasUsed = uint64(len(tx.BlobHashes()) * params.BlobTxBlobGasPerBlob) + receipt.BlobGasPrice = evm.Context.BlobBaseFee + } + // If the transaction created a contract, store the creation address in the receipt. + if msg.To == nil { + receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, *nonce) + } + // Set the receipt logs and create the bloom filter. + receipt.Logs = statedb.GetLogs(tx.Hash(), header.Number.Uint64(), header.Hash()) + receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) + receipt.BlockHash = header.Hash() + receipt.BlockNumber = header.Number + receipt.TransactionIndex = uint(statedb.TxIndex()) + return receipt, nil +} diff --git a/core/state/dump.go b/core/state/dump.go index 55abb50f1c..1b0c0c0dae 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -160,7 +160,7 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey [] address = &addr account.Address = address } - obj := newObject(s, addr, &data) + obj := newObject(s, s.isParallel, addr, &data) if !conf.SkipCode { account.Code = obj.Code() } diff --git a/core/state/interface.go b/core/state/interface.go new file mode 100644 index 0000000000..3e808aa82e --- /dev/null +++ b/core/state/interface.go @@ -0,0 +1,81 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/holiman/uint256" +) + +// StateDBer is copied from vm/interface.go +// It is used by StateObject & Journal right now, to abstract StateDB & ParallelStateDB +type StateDBer interface { + getBaseStateDB() *StateDB + getStateObject(common.Address) *stateObject // only accessible for journal + storeStateObj(common.Address, *stateObject) // only accessible for journal + + CreateAccount(common.Address) + + SubBalance(common.Address, *uint256.Int) + AddBalance(common.Address, *uint256.Int) + GetBalance(common.Address) *uint256.Int + + GetNonce(common.Address) uint64 + SetNonce(common.Address, uint64) + + GetCodeHash(common.Address) common.Hash + GetCode(common.Address) []byte + SetCode(common.Address, []byte) + GetCodeSize(common.Address) int + + AddRefund(uint64) + SubRefund(uint64) + GetRefund() uint64 + + GetCommittedState(common.Address, common.Hash) common.Hash + GetState(common.Address, common.Hash) common.Hash + SetState(common.Address, common.Hash, common.Hash) + + SelfDestruct(common.Address) + HasSelfDestructed(common.Address) bool + + // Exist reports whether the given account exists in state. + // Notably this should also return true for suicided accounts. + Exist(common.Address) bool + // Empty returns whether the given account is empty. Empty + // is defined according to EIP161 (balance = nonce = code = 0). + Empty(common.Address) bool + + //PrepareAccessList(sender common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) + AddressInAccessList(addr common.Address) bool + SlotInAccessList(addr common.Address, slot common.Hash) (addressOk bool, slotOk bool) + // AddAddressToAccessList adds the given address to the access list. This operation is safe to perform + // even if the feature/fork is not active yet + AddAddressToAccessList(addr common.Address) + // AddSlotToAccessList adds the given (address,slot) to the access list. This operation is safe to perform + // even if the feature/fork is not active yet + AddSlotToAccessList(addr common.Address, slot common.Hash) + + RevertToSnapshot(int) + Snapshot() int + + AddLog(*types.Log) + AddPreimage(common.Hash, []byte) + + GetStateObjectFromUnconfirmedDB(addr common.Address) (*stateObject, bool) +} diff --git a/core/state/journal.go b/core/state/journal.go index 6cdc1fc868..635d516d49 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -17,6 +17,7 @@ package state import ( + "fmt" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" ) @@ -25,7 +26,7 @@ import ( // reverted on demand. type journalEntry interface { // revert undoes the changes introduced by this journal entry. - revert(*StateDB) + revert(StateDBer) // dirtied returns the Ethereum address modified by this journal entry. dirtied() *common.Address @@ -49,6 +50,7 @@ func newJournal() *journal { // append inserts a new modification entry to the end of the change journal. func (j *journal) append(entry journalEntry) { j.entries = append(j.entries, entry) + if addr := entry.dirtied(); addr != nil { j.dirties[*addr]++ } @@ -56,10 +58,10 @@ func (j *journal) append(entry journalEntry) { // revert undoes a batch of journalled modifications along with any reverted // dirty handling too. -func (j *journal) revert(statedb *StateDB, snapshot int) { +func (j *journal) revert(dber StateDBer, snapshot int) { for i := len(j.entries) - 1; i >= snapshot; i-- { // Undo the changes made by the operation - j.entries[i].revert(statedb) + j.entries[i].revert(dber) // Drop any dirty tracking induced by the change if addr := j.entries[i].dirtied(); addr != nil { @@ -151,8 +153,18 @@ type ( } ) -func (ch createObjectChange) revert(s *StateDB) { - delete(s.stateObjects, *ch.account) +func (ch createObjectChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() + if s.parallel.isSlotDB { + delete(s.parallel.dirtiedStateObjectsInSlot, *ch.account) + delete(s.parallel.addrStateChangesInSlot, *ch.account) + delete(s.parallel.nonceChangesInSlot, *ch.account) + delete(s.parallel.balanceChangesInSlot, *ch.account) + delete(s.parallel.codeChangesInSlot, *ch.account) + delete(s.parallel.kvChangesInSlot, *ch.account) + } else { + s.deleteStateObj(*ch.account) + } delete(s.stateObjectsDirty, *ch.account) } @@ -160,10 +172,25 @@ func (ch createObjectChange) dirtied() *common.Address { return ch.account } -func (ch resetObjectChange) revert(s *StateDB) { - s.setStateObject(ch.prev) +func (ch resetObjectChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() + if s.parallel.isSlotDB { + + if ch.prev.address.Hex() == "0x6295eE1B4F6dD65047762F924Ecd367c17eaBf8f" { + fmt.Printf("Dav - revert() - set dirtiedStateObjectsInSlot[%s] = obj, obj.codehash: %s\n", + ch.prev.address, common.Bytes2Hex(ch.prev.CodeHash())) + } + // ch.prev must be from dirtiedStateObjectsInSlot, put it back + s.parallel.dirtiedStateObjectsInSlot[ch.prev.address] = ch.prev + } else { + // ch.prev was got from main DB, put it back to main DB. + s.storeStateObj(ch.prev.address, ch.prev) + } + if !ch.prevdestruct { + s.snapParallelLock.Lock() delete(s.stateObjectsDestruct, ch.prev.address) + s.snapParallelLock.Unlock() } if ch.prevAccount != nil { s.accounts[ch.prev.addrHash] = ch.prevAccount @@ -183,8 +210,8 @@ func (ch resetObjectChange) dirtied() *common.Address { return ch.account } -func (ch selfDestructChange) revert(s *StateDB) { - obj := s.getStateObject(*ch.account) +func (ch selfDestructChange) revert(dber StateDBer) { + obj := dber.getStateObject(*ch.account) if obj != nil { obj.selfDestructed = ch.prev obj.setBalance(ch.prevbalance) @@ -197,46 +224,47 @@ func (ch selfDestructChange) dirtied() *common.Address { var ripemd = common.HexToAddress("0000000000000000000000000000000000000003") -func (ch touchChange) revert(s *StateDB) { +func (ch touchChange) revert(dber StateDBer) { } func (ch touchChange) dirtied() *common.Address { return ch.account } -func (ch balanceChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setBalance(ch.prev) +func (ch balanceChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setBalance(ch.prev) } func (ch balanceChange) dirtied() *common.Address { return ch.account } -func (ch nonceChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setNonce(ch.prev) +func (ch nonceChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setNonce(ch.prev) } func (ch nonceChange) dirtied() *common.Address { return ch.account } -func (ch codeChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode) +func (ch codeChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode) } func (ch codeChange) dirtied() *common.Address { return ch.account } -func (ch storageChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setState(ch.key, ch.prevalue) +func (ch storageChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setState(ch.key, ch.prevalue) } func (ch storageChange) dirtied() *common.Address { return ch.account } -func (ch transientStorageChange) revert(s *StateDB) { +func (ch transientStorageChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() s.setTransientState(*ch.account, ch.key, ch.prevalue) } @@ -244,7 +272,8 @@ func (ch transientStorageChange) dirtied() *common.Address { return nil } -func (ch refundChange) revert(s *StateDB) { +func (ch refundChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() s.refund = ch.prev } @@ -252,7 +281,8 @@ func (ch refundChange) dirtied() *common.Address { return nil } -func (ch addLogChange) revert(s *StateDB) { +func (ch addLogChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() logs := s.logs[ch.txhash] if len(logs) == 1 { delete(s.logs, ch.txhash) @@ -266,7 +296,8 @@ func (ch addLogChange) dirtied() *common.Address { return nil } -func (ch addPreimageChange) revert(s *StateDB) { +func (ch addPreimageChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() delete(s.preimages, ch.hash) } @@ -274,7 +305,7 @@ func (ch addPreimageChange) dirtied() *common.Address { return nil } -func (ch accessListAddAccountChange) revert(s *StateDB) { +func (ch accessListAddAccountChange) revert(dber StateDBer) { /* One important invariant here, is that whenever a (addr, slot) is added, if the addr is not already present, the add causes two journal entries: @@ -284,6 +315,7 @@ func (ch accessListAddAccountChange) revert(s *StateDB) { (addr) at this point, since no storage adds can remain when come upon a single (addr) change. */ + s := dber.getBaseStateDB() s.accessList.DeleteAddress(*ch.address) } @@ -291,7 +323,8 @@ func (ch accessListAddAccountChange) dirtied() *common.Address { return nil } -func (ch accessListAddSlotChange) revert(s *StateDB) { +func (ch accessListAddSlotChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() s.accessList.DeleteSlot(*ch.address, *ch.slot) } diff --git a/core/state/parallel_statedb.go b/core/state/parallel_statedb.go new file mode 100644 index 0000000000..34f0b95a6a --- /dev/null +++ b/core/state/parallel_statedb.go @@ -0,0 +1,1735 @@ +package state + +import ( + "bytes" + "fmt" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/holiman/uint256" + "runtime" + "sort" + "sync" + "time" +) + +const defaultNumOfSlots = 100 + +var parallelKvOnce sync.Once + +type ParallelKvCheckUnit struct { + addr common.Address + key common.Hash + val common.Hash +} + +type ParallelKvCheckMessage struct { + slotDB *ParallelStateDB + isStage2 bool + kvUnit ParallelKvCheckUnit +} + +var parallelKvCheckReqCh chan ParallelKvCheckMessage +var parallelKvCheckResCh chan bool + +type ParallelStateDB struct { + StateDB +} + +func (s *ParallelStateDB) GetRefund() uint64 { + return s.refund +} + +func (s *ParallelStateDB) AddressInAccessList(addr common.Address) bool { + return s.accessList.ContainsAddress(addr) +} + +func (s *ParallelStateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressOk bool, slotOk bool) { + return s.accessList.Contains(addr, slot) +} + +func (s *ParallelStateDB) AddAddressToAccessList(addr common.Address) { + if s.accessList.AddAddress(addr) { + s.journal.append(accessListAddAccountChange{&addr}) + } +} + +func (s *ParallelStateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) { + addrMod, slotMod := s.accessList.AddSlot(addr, slot) + if addrMod { + // In practice, this should not happen, since there is no way to enter the + // scope of 'address' without having the 'address' become already added + // to the access list (via call-variant, create, etc). + // Better safe than sorry, though + s.journal.append(accessListAddAccountChange{&addr}) + } + if slotMod { + s.journal.append(accessListAddSlotChange{ + address: &addr, + slot: &slot, + }) + } +} + +func (s *ParallelStateDB) Snapshot() int { + id := s.nextRevisionId + s.nextRevisionId++ + s.validRevisions = append(s.validRevisions, revision{id, s.journal.length()}) + return id +} + +func hasKvConflict(slotDB *ParallelStateDB, addr common.Address, key common.Hash, val common.Hash, isStage2 bool) bool { + mainDB := slotDB.parallel.baseStateDB + + if isStage2 { // update slotDB's unconfirmed DB list and try + if valUnconfirm, ok := slotDB.getKVFromUnconfirmedDB(addr, key); ok { + if !bytes.Equal(val.Bytes(), valUnconfirm.Bytes()) { + log.Debug("IsSlotDBReadsValid KV read is invalid in unconfirmed", "addr", addr, + "valSlot", val, "valUnconfirm", valUnconfirm, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return true + } + } + } + valMain := mainDB.GetState(addr, key) + if !bytes.Equal(val.Bytes(), valMain.Bytes()) { + log.Debug("hasKvConflict is invalid", "addr", addr, + "key", key, "valSlot", val, + "valMain", valMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return true // return false, Range will be terminated. + } + return false +} + +// StartKvCheckLoop start several routines to do conflict check +func StartKvCheckLoop() { + parallelKvCheckReqCh = make(chan ParallelKvCheckMessage, 200) + parallelKvCheckResCh = make(chan bool, 10) + for i := 0; i < runtime.NumCPU(); i++ { + go func() { + for { + kvEle1 := <-parallelKvCheckReqCh + parallelKvCheckResCh <- hasKvConflict(kvEle1.slotDB, kvEle1.kvUnit.addr, + kvEle1.kvUnit.key, kvEle1.kvUnit.val, kvEle1.isStage2) + } + }() + } +} + +// NewSlotDB creates a new State DB based on the provided StateDB. +// With parallel, each execution slot would have its own StateDB. +// This method must be called after the baseDB call PrepareParallel() +func NewSlotDB(db *StateDB, txIndex int, baseTxIndex int, unconfirmedDBs *sync.Map /*map[int]*ParallelStateDB*/) *ParallelStateDB { + slotDB := db.CopyForSlot() + slotDB.txIndex = txIndex + slotDB.originalRoot = db.originalRoot + slotDB.parallel.baseStateDB = db + slotDB.parallel.baseTxIndex = baseTxIndex + slotDB.parallel.unconfirmedDBs = unconfirmedDBs + + return slotDB +} + +// RevertSlotDB keep the Read list for conflict detect, +// discard all state changes except: +// - nonce and balance of from address +// - balance of system address: will be used on merge to update SystemAddress's balance +func (s *ParallelStateDB) RevertSlotDB(from common.Address) { + s.parallel.kvChangesInSlot = make(map[common.Address]StateKeys) + s.parallel.nonceChangesInSlot = make(map[common.Address]struct{}) + s.parallel.balanceChangesInSlot = make(map[common.Address]struct{}, 1) + s.parallel.addrStateChangesInSlot = make(map[common.Address]bool) // 0: created, 1: deleted + + selfStateObject := s.parallel.dirtiedStateObjectsInSlot[from] + s.parallel.dirtiedStateObjectsInSlot = make(map[common.Address]*stateObject, 2) + // keep these elements + if from.Hex() == "0x6295eE1B4F6dD65047762F924Ecd367c17eaBf8f" { + fmt.Printf("Dav - RevertSlotDB - set dirtiedStateObjectsInSlot[%s] = obj, obj.codehash: %s\n", + from, common.Bytes2Hex(selfStateObject.CodeHash())) + } + s.parallel.dirtiedStateObjectsInSlot[from] = selfStateObject + s.parallel.balanceChangesInSlot[from] = struct{}{} + s.parallel.nonceChangesInSlot[from] = struct{}{} +} + +func (s *ParallelStateDB) getBaseStateDB() *StateDB { + return &s.StateDB +} + +func (s *ParallelStateDB) SetSlotIndex(index int) { + s.parallel.SlotIndex = index +} + +// for parallel execution mode, try to get dirty StateObject in slot first. +// it is mainly used by journal revert right now. +func (s *ParallelStateDB) getStateObject(addr common.Address) *stateObject { + var ret *stateObject + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + if obj.deleted { + return nil + } + ret = obj + } else { + // can not call s.StateDB.getStateObject(), since `newObject` need ParallelStateDB as the interface + ret = s.getStateObjectNoSlot(addr) + } + return ret +} + +func (s *ParallelStateDB) storeStateObj(addr common.Address, stateObject *stateObject) { + // When a state object is stored into s.parallel.stateObjects, + // it belongs to base StateDB, it is confirmed and valid. + // todo Dav: why need change this? -- delete me ! + // stateObject.db = s.parallel.baseStateDB + // stateObject.dbItf = s.parallel.baseStateDB + + // the object could be created in SlotDB, if it got the object from DB and + // update it to the shared `s.parallel.stateObjects`` + stateObject.db.storeParallelLock.Lock() + if _, ok := s.parallel.stateObjects.Load(addr); !ok { + s.parallel.stateObjects.Store(addr, stateObject) + } + stateObject.db.storeParallelLock.Unlock() +} + +func (s *ParallelStateDB) getStateObjectNoSlot(addr common.Address) *stateObject { + if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { + return obj + } + return nil +} + +// createObject creates a new state object. If there is an existing account with +// the given address, it is overwritten and returned as the second return value. + +// prev is used for CreateAccount to get its balance +// Parallel mode: +// if prev in dirty: revert is ok +// if prev in unconfirmed DB: addr state read record, revert should not put it back +// if prev in main DB: addr state read record, revert should not put it back +// if pre no exist: addr state read record, + +// `prev` is used to handle revert, to recover with the `prev` object +// In Parallel mode, we only need to recover to `prev` in SlotDB, +// +// a.if it is not in SlotDB, `revert` will remove it from the SlotDB +// b.if it is existed in SlotDB, `revert` will recover to the `prev` in SlotDB +// c.as `snapDestructs` it is the same +func (s *ParallelStateDB) createObject(addr common.Address) (newobj *stateObject) { + prev := s.parallel.dirtiedStateObjectsInSlot[addr] + // TODO-dav: check + // There can be tx0 create an obj at addr0, tx1 destruct it, and tx2 recreate it use create2. + // so if tx0 is finalized, and tx1 is unconfirmed, we have to check the states of unconfirmed, otherwise there + // will be wrong behavior that we recreate an object that is already there. see. test "TestDeleteThenCreate" + var prevdestruct bool + + if s.snap != nil && prev != nil { + s.snapParallelLock.Lock() + _, prevdestruct = s.snapDestructs[prev.address] + s.parallel.addrSnapDestructsReadsInSlot[addr] = prevdestruct + if !prevdestruct { + // To destroy the previous trie node first and update the trie tree + // with the new object on block commit. + s.snapDestructs[prev.address] = struct{}{} + } + s.snapParallelLock.Unlock() + } + newobj = newObject(s, s.isParallel, addr, nil) + newobj.setNonce(0) // sets the object to dirty + if prev == nil { + s.journal.append(createObjectChange{account: &addr}) + } else { + s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) + } + + s.parallel.addrStateChangesInSlot[addr] = true // the object is created + s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + // notice: all the KVs are cleared if any + s.parallel.kvChangesInSlot[addr] = make(StateKeys) + newobj.created = true + s.parallel.dirtiedStateObjectsInSlot[addr] = newobj + return newobj +} + +// getDeletedStateObject is similar to getStateObject, but instead of returning +// nil for a deleted state object, it returns the actual object with the deleted +// flag set. This is needed by the state journal to revert to the correct s- +// destructed object instead of wiping all knowledge about the state object. +func (s *ParallelStateDB) getDeletedStateObject(addr common.Address) *stateObject { + + // Prefer live objects if any is available + if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil { + return obj + } + + data, ok := s.getStateObjectFromSnapshotOrTrie(addr) + if !ok { + return nil + } + + // this is why we have to use a separate getDeletedStateObject for ParallelStateDB + // `s` has to be the ParallelStateDB + obj := newObject(s, s.isParallel, addr, data) + s.storeStateObj(addr, obj) + return obj +} + +// GetOrNewStateObject retrieves a state object or create a new state object if nil. +// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one +func (s *ParallelStateDB) GetOrNewStateObject(addr common.Address) *stateObject { + var object *stateObject + var ok bool + if object, ok = s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return object + } + + // try unconfirmedDB + object, _ = s.getStateObjectFromUnconfirmedDB(addr) + if object != nil { + // object found in unconfirmedDB, check existence + if object.deleted || object.selfDestructed { + object = s.createObject(addr) + s.parallel.addrStateReadsInSlot[addr] = false + return object + } + } else { + object = s.getStateObjectNoSlot(addr) // try to get from base db + } + // not found, or found in NoSlot or found in unconfirmed. + exist := true + // TODO-dav: the check of nil and delete already done by NoSlot and unconfirmedDB, may optimize it for dirty only. + if object == nil || object.deleted { + object = s.createObject(addr) + exist = false + } + s.parallel.addrStateReadsInSlot[addr] = exist // true: exist, false: not exist + return object +} + +// Exist reports whether the given account address exists in the state. +// Notably this also returns true for suicided accounts. +func (s *ParallelStateDB) Exist(addr common.Address) bool { + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + if obj.deleted { + log.Error("Exist in dirty, but marked as deleted or suicided", + "txIndex", s.txIndex, "baseTxIndex:", s.parallel.baseTxIndex) + return false + } + return true + } + // 2.Try to get from unconfirmed & main DB + // 2.1 Already read before + if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { + return exist + } + + // 2.2 Try to get from unconfirmed DB if exist + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr, false); ok { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return exist + } + + // 3.Try to get from main StateDB + exist := s.getStateObjectNoSlot(addr) != nil + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return exist +} + +// Empty returns whether the state object is either non-existent +// or empty according to the EIP161 specification (balance = nonce = code = 0) +func (s *ParallelStateDB) Empty(addr common.Address) bool { + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // dirty object is light copied and fixup on need, + // empty could be wrong, except it is created with this TX + if _, ok := s.parallel.addrStateChangesInSlot[addr]; ok { + return obj.empty() + } + // so we have to check it manually + // empty means: Nonce == 0 && Balance == 0 && CodeHash == emptyCodeHash + if s.GetBalance(addr).Sign() != 0 { // check balance first, since it is most likely not zero + return false + } + if s.GetNonce(addr) != 0 { + return false + } + codeHash := s.GetCodeHash(addr) + return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty + } + // 2.Try to get from unconfirmed & main DB + // 2.1 Already read before + if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { + // exist means not empty + return !exist + } + // 2.2 Try to get from unconfirmed DB if exist + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr, true); ok { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return !exist + } + // 2.3 Try to get from NoSlot. + so := s.getStateObjectNoSlot(addr) + exist := so != nil + empty := (!exist) || so.empty() + + s.parallel.addrStateReadsInSlot[addr] = exist // update read cache + return empty +} + +// GetBalance retrieves the balance from the given address or 0 if object not found +// GetFrom the dirty list => from unconfirmed DB => get from main stateDB +func (s *ParallelStateDB) GetBalance(addr common.Address) *uint256.Int { + var dirtyObj *stateObject + // 0. Test whether it is deleted in dirty. + if o, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + if o.deleted { + return common.U2560 + } + dirtyObj = o + } + + // 1.Try to get from dirty + if _, ok := s.parallel.balanceChangesInSlot[addr]; ok { + // on balance fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup balance based on unconfirmed DB or main DB + return dirtyObj.Balance() + } + // 2.Try to get from unconfirmed DB or main DB + // 2.1 Already read before + if balance, ok := s.parallel.balanceReadsInSlot[addr]; ok { + return balance + } + + balance := common.U2560 + // 2.2 Try to get from unconfirmed DB if exist + if blc := s.getBalanceFromUnconfirmedDB(addr); blc != nil { + balance = blc + } else { + // 3. Try to get from main StateObject + blc = common.U2560 + object := s.getStateObjectNoSlot(addr) + if object != nil { + blc = object.Balance() + } + balance = blc + } + s.parallel.balanceReadsInSlot[addr] = balance + + // fixup dirties + if dirtyObj != nil && dirtyObj.Balance() != balance { + dirtyObj.setBalance(balance) + } + + return balance +} + +func (s *ParallelStateDB) GetNonce(addr common.Address) uint64 { + var dirtyObj *stateObject + // 0. Test whether it is deleted in dirty. + if o, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + if o.deleted { + return 0 + } + dirtyObj = o + } + + // 1.Try to get from dirty + if _, ok := s.parallel.nonceChangesInSlot[addr]; ok { + // on nonce fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup nonce based on unconfirmed DB or main DB + return dirtyObj.Nonce() + } + // 2.Try to get from unconfirmed DB or main DB + // 2.1 Already read before + if nonce, ok := s.parallel.nonceReadsInSlot[addr]; ok { + return nonce + } + + var nonce uint64 = 0 + // 2.2 Try to get from unconfirmed DB if exist + if nc, ok := s.getNonceFromUnconfirmedDB(addr); ok { + nonce = nc + } else { + // 3.Try to get from main StateDB + nc = 0 + object := s.getStateObjectNoSlot(addr) + if object != nil { + nc = object.Nonce() + } + nonce = nc + } + s.parallel.nonceReadsInSlot[addr] = nonce + + // fixup dirties + if dirtyObj != nil && dirtyObj.Nonce() < nonce { + dirtyObj.setNonce(nonce) + } + return nonce +} + +func (s *ParallelStateDB) GetCode(addr common.Address) []byte { + var dirtyObj *stateObject + // 0. Test whether it is deleted in dirty. + if o, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + if o.deleted { + return nil + } + dirtyObj = o + } + + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup code based on unconfirmed DB or main DB + return dirtyObj.Code() + } + // 2.Try to get from unconfirmed DB or main DB + // 2.1 Already read before + if code, ok := s.parallel.codeReadsInSlot[addr]; ok { + return code + } + var code []byte + // 2.2 Try to get from unconfirmed DB if exist + if cd, ok := s.getCodeFromUnconfirmedDB(addr); ok { + code = cd + } else { + // 3. Try to get from main StateObject + object := s.getStateObjectNoSlot(addr) + if object != nil { + code = object.Code() + } + } + s.parallel.codeReadsInSlot[addr] = code + + // fixup dirties + if dirtyObj != nil && !bytes.Equal(dirtyObj.code, code) { + dirtyObj.code = code + } + return code +} + +func (s *ParallelStateDB) GetCodeSize(addr common.Address) int { + var dirtyObj *stateObject + // 0. Test whether it is deleted in dirty. + if o, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + if o.deleted { + return 0 + } + dirtyObj = o + } + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup code based on unconfirmed DB or main DB + return dirtyObj.CodeSize() + } + // 2.Try to get from unconfirmed DB or main DB + // 2.1 Already read before + if code, ok := s.parallel.codeReadsInSlot[addr]; ok { + return len(code) // len(nil) is 0 too + } + + cs := 0 + var code []byte + // 2.2 Try to get from unconfirmed DB if exist + if cd, ok := s.getCodeFromUnconfirmedDB(addr); ok { + cs = len(cd) // len(nil) is 0 too + code = cd + } else { + // 3. Try to get from main StateObject + var cc []byte + object := s.getStateObjectNoSlot(addr) + if object != nil { + // This is where we update the code from possible db.ContractCode if the original object.code is nil. + cc = object.Code() + cs = object.CodeSize() + } + code = cc + } + s.parallel.codeReadsInSlot[addr] = code + // fixup dirties + if dirtyObj != nil { + if !bytes.Equal(dirtyObj.code, code) { + dirtyObj.code = code + } + } + return cs +} + +// GetCodeHash return: +// - common.Hash{}: the address does not exist +// - emptyCodeHash: the address exist, but code is empty +// - others: the address exist, and code is not empty +func (s *ParallelStateDB) GetCodeHash(addr common.Address) common.Hash { + var dirtyObj *stateObject + // 0. Test whether it is deleted in dirty. + if o, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + if o.deleted { + return common.Hash{} + } + dirtyObj = o + } + + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup balance based on unconfirmed DB or main DB + return common.BytesToHash(dirtyObj.CodeHash()) + } + // 2.Try to get from unconfirmed DB or main DB + // 2.1 Already read before + if codeHash, ok := s.parallel.codeHashReadsInSlot[addr]; ok { + return codeHash + } + codeHash := common.Hash{} + // 2.2 Try to get from unconfirmed DB if exist + if cHash, ok := s.getCodeHashFromUnconfirmedDB(addr); ok { + codeHash = cHash + } else { + // 3. Try to get from main StateObject + object := s.getStateObjectNoSlot(addr) + + if object != nil { + codeHash = common.BytesToHash(object.CodeHash()) + } + } + s.parallel.codeHashReadsInSlot[addr] = codeHash + + // fill slots in dirty if exist. + // A case for this: + // TX0: createAccount at addr 0x123, set code and codehash + // TX1: AddBalance - now an obj in dirty with empty codehash, and codeChangesInSlot is false (not changed) + // GetCodeHash - get from unconfirmedDB or mainDB, set codeHashReadsInSlot to the new val. + // SELFDESTRUCT - set codeChangesInSlot, but the obj in dirty is with Empty codehash. + // obj marked selfdestructed but not deleted. so CodeHash is not empty. + // GetCodeHash - since the codeChangesInslot is marked, get the object from dirty, and get the + // wrong 'empty' hash. + if dirtyObj != nil { + // found one + if dirtyObj.CodeHash() == nil || bytes.Equal(dirtyObj.CodeHash(), emptyCodeHash) { + if bytes.Equal(codeHash.Bytes(), emptyCodeHash) { + fmt.Printf("Dav -- update codehash to empty in dirty - addr: %s\n", addr) + } + dirtyObj.data.CodeHash = codeHash.Bytes() + } + } + return codeHash +} + +// GetState retrieves a value from the given account's storage trie. +// For parallel mode wih, get from the state in order: +// +// -> self dirty, both Slot & MainProcessor +// -> pending of self: Slot on merge +// -> pending of unconfirmed DB +// -> pending of main StateDB +// -> origin +func (s *ParallelStateDB) GetState(addr common.Address, hash common.Hash) common.Hash { + var dirtyObj *stateObject + // 0. Test whether it is deleted in dirty. + if o, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + if o == nil || o.deleted { + return common.Hash{} + } + dirtyObj = o + } + // 1.Try to get from dirty + if exist, ok := s.parallel.addrStateChangesInSlot[addr]; ok { + if !exist { + // it could be suicided within this SlotDB? + // it should be able to get state from suicided address within a Tx: + // e.g. within a transaction: call addr:suicide -> get state: should be ok + // return common.Hash{} + log.Info("ParallelStateDB GetState suicided", "addr", addr, "hash", hash) + } else { + // It is possible that an object get created but not dirtied since there is no state set, such as recreate. + // In this case, simply return common.Hash{}. + // This is for corner case: + // B0: TX0 --> createAccount @addr1 -- merged into DB + // B1: Tx1 and Tx2 + // Tx1 account@addr1 selfDestruct -- unconfirmed + // Tx2 recreate account@addr2 -- executing + // Since any state change and suicide could record in s.parallel.addrStateChangeInSlot, it is save to simple + // return common.Hash{} for this case as the previous TX must has the object destructed. + // P.S. if the Tx2 both destruct and recreate the object, it will not fall into this logic, as the change + // will be recorded in dirtiedStateObjectsInSlot. + + // it could be suicided within this SlotDB? + // it should be able to get state from suicided address within a Tx: + // e.g. within a transaction: call addr:suicide -> get state: should be ok + // return common.Hash{} + log.Info("ParallelStateDB GetState suicided", "addr", addr, "hash", hash) + + if dirtyObj == nil { + log.Error("ParallelStateDB GetState access untouched object after create, may check create2") + return common.Hash{} + } + return dirtyObj.GetState(hash) + } + } + + if keys, ok := s.parallel.kvChangesInSlot[addr]; ok { + if _, ok := keys[hash]; ok { + return dirtyObj.GetState(hash) + } + } + // 2.Try to get from unconfirmed DB or main DB + // 2.1 Already read before + if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { + if val, ok := storage.GetValue(hash); ok { + return val + } + } + + value := common.Hash{} + // 2.2 Try to get from unconfirmed DB if exist + if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + value = val + } else { + // 3.Get from main StateDB + object := s.getStateObjectNoSlot(addr) + val = common.Hash{} + if object != nil { + val = object.GetState(hash) + // TODO-dav: delete following originStorage change, as lightCopy is copy originStorage now. + // test dirty, there can be a case the object saved in dirty by other changes such as SetBalance. But the + // addrStateChangesInSlot[addr] does not record it. So later load from the dirties would cause flaw because the + // first value loaded from main stateDB is not updated to the object in dirties. + // Moreover, there is also an issue that the other kv in the object get from snap or trie that is accessed from + // previous tx in same block but not touched in current tx, is missed in the dirty. which may cause issues when + // calculate the root. + _, recorded := s.parallel.addrStateChangesInSlot[addr] + obj, isDirty := s.parallel.dirtiedStateObjectsInSlot[addr] + if !recorded && isDirty { + v, ok := obj.originStorage.GetValue(hash) + + if !(ok && v.Cmp(val) == 0) { + obj.originStorage.StoreValue(hash, val) + } + } + } + value = val + } + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, value) // update cache + + // fixup Dirty + if dirtyObj != nil { + old := dirtyObj.GetState(hash) + if old.Cmp(value) != 0 { + dirtyObj.setState(hash, value) + } + } + return value +} + +// GetCommittedState retrieves a value from the given account's committed storage trie. +func (s *ParallelStateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { + // 0. Test whether it is deleted. + var dirtyObj *stateObject + if o, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + if o.deleted { + return common.Hash{} + } + dirtyObj = o + } + // 2.Try to get from unconfirmed DB or main DB + // KVs in unconfirmed DB can be seen as pending storage + // KVs in main DB are merged from SlotDB and has done finalise() on merge, can be seen as pending storage too. + // 2.1 Already read before + if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { + if val, ok := storage.GetValue(hash); ok { + return val + } + } + value := common.Hash{} + // 2.2 Try to get from unconfirmed DB if exist + if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + value = val + } else { + // 3. Try to get from main DB + val = common.Hash{} + object := s.getStateObjectNoSlot(addr) + if object != nil { + val = object.GetCommittedState(hash) + } + value = val + } + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, value) // update cache + + // fixup Dirty + if dirtyObj != nil { + old := dirtyObj.GetState(hash) + if old.Cmp(value) != 0 { + dirtyObj.setState(hash, value) + } + } + + return value +} + +func (s *ParallelStateDB) HasSelfDestructed(addr common.Address) bool { + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + if obj == nil || obj.deleted { + return false + } + return obj.selfDestructed + } + // 2.Try to get from unconfirmed + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr, false); ok { + return !exist + } + + object := s.getDeletedStateObject(addr) + if object != nil { + return object.selfDestructed + } + return false +} + +// AddBalance adds amount to the account associated with addr. +func (s *ParallelStateDB) AddBalance(addr common.Address, amount *uint256.Int) { + // add balance will perform a read operation first + // if amount == 0, no balance change, but there is still an empty check. + object := s.GetOrNewStateObject(addr) + if object != nil { + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := object.lightCopy(s) // light copy from main DB + // do balance fixup from the confirmed DB, it could be more reliable than main DB + balance := s.GetBalance(addr) // it will record the balance read operation + newStateObject.setBalance(balance) + newStateObject.AddBalance(amount) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.balanceChangesInSlot[addr] = struct{}{} + return + } + // already dirty, make sure the balance is fixed up since it could be previously dirtied by nonce or KV... + balance := s.GetBalance(addr) + if object.Balance().Cmp(balance) != 0 { + log.Warn("AddBalance in dirty, but balance has not do fixup", "txIndex", s.txIndex, "addr", addr, + "stateObject.Balance()", object.Balance(), "s.GetBalance(addr)", balance) + object.setBalance(balance) + } + + object.AddBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } +} + +// SubBalance subtracts amount from the account associated with addr. +func (s *ParallelStateDB) SubBalance(addr common.Address, amount *uint256.Int) { + // unlike add, sub 0 balance will not touch empty object + object := s.GetOrNewStateObject(addr) + if object != nil { + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := object.lightCopy(s) // light copy from main DB + // do balance fixup from the confirmed DB, it could be more reliable than main DB + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + newStateObject.SubBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + // already dirty, make sure the balance is fixed up since it could be previously dirtied by nonce or KV... + balance := s.GetBalance(addr) + if object.Balance().Cmp(balance) != 0 { + log.Warn("SubBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, + "stateObject.Balance()", object.Balance(), "s.GetBalance(addr)", balance) + object.setBalance(balance) + } + object.SubBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetBalance(addr common.Address, amount *uint256.Int) { + object := s.GetOrNewStateObject(addr) + if object != nil { + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := object.lightCopy(s) + // update balance for revert, in case child contract is reverted, + // it should revert to the previous balance + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + newStateObject.SetBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + + balance := s.GetBalance(addr) + object.setBalance(balance) + object.SetBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetNonce(addr common.Address, nonce uint64) { + object := s.GetOrNewStateObject(addr) + if object != nil { + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := object.lightCopy(s) + noncePre := s.GetNonce(addr) + newStateObject.setNonce(noncePre) // nonce fixup + newStateObject.SetNonce(nonce) + s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + noncePre := s.GetNonce(addr) + object.setNonce(noncePre) // nonce fixup + object.SetNonce(nonce) + s.parallel.nonceChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetCode(addr common.Address, code []byte) { + object := s.GetOrNewStateObject(addr) + if object != nil { + codeHash := crypto.Keccak256Hash(code) + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := object.lightCopy(s) + codePre := s.GetCode(addr) // code fixup + codeHashPre := crypto.Keccak256Hash(codePre) + newStateObject.setCode(codeHashPre, codePre) + newStateObject.SetCode(codeHash, code) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.codeChangesInSlot[addr] = struct{}{} + return + } + codePre := s.GetCode(addr) // code fixup + codeHashPre := crypto.Keccak256Hash(codePre) + object.setCode(codeHashPre, codePre) + object.SetCode(codeHash, code) + s.parallel.codeChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetState(addr common.Address, key, value common.Hash) { + object := s.GetOrNewStateObject(addr) // attention: if StateObject's lightCopy, its storage is only a part of the full storage, + if object != nil { + if s.parallel.baseTxIndex+1 == s.txIndex { + // we check if state is unchanged + // only when current transaction is the next transaction to be committed + // fixme: there is a bug, block: 14,962,284, + // stateObject is in dirty (light copy), but the key is in mainStateDB + // stateObject dirty -> committed, will skip mainStateDB dirty + if s.GetState(addr, key) == value { + log.Debug("Skip set same state", "baseTxIndex", s.parallel.baseTxIndex, + "txIndex", s.txIndex, "addr", addr, + "key", key, "value", value) + return + } + } + + if s.parallel.kvChangesInSlot[addr] == nil { + s.parallel.kvChangesInSlot[addr] = make(StateKeys) // make(Storage, defaultNumOfSlots) + } + + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := object.lightCopy(s) + newStateObject.SetState(key, value) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.addrStateChangesInSlot[addr] = true + return + } + // do State Update + object.SetState(key, value) + s.parallel.addrStateChangesInSlot[addr] = true + } +} + +// SelfDestruct marks the given account as suicided. +// This clears the account balance. +// +// The account's state object is still available until the state is committed, +// getStateObject will return a non-nil account after Suicide. +func (s *ParallelStateDB) SelfDestruct(addr common.Address) { + var object *stateObject + // 1.Try to get from dirty, it could be suicided inside of contract call + object = s.parallel.dirtiedStateObjectsInSlot[addr] + + if object != nil && object.deleted { + return + } + + if object == nil { + // 2.Try to get from unconfirmed, if deleted return false, since the address does not exist + if obj, ok := s.getStateObjectFromUnconfirmedDB(addr); ok { + object = obj + // Treat selfDestructed in unconfirmedDB as deleted since it will be finalised at merge phase. + deleted := object.deleted || object.selfDestructed + s.parallel.addrStateReadsInSlot[addr] = !deleted // true: exist, false: deleted + if deleted { + return + } + } + } + + if object == nil { + // 3.Try to get from main StateDB + object = s.getStateObjectNoSlot(addr) + if object == nil || object.deleted { + s.parallel.addrStateReadsInSlot[addr] = false // true: exist, false: deleted + return + } + s.parallel.addrStateReadsInSlot[addr] = true // true: exist, false: deleted + } + + s.journal.append(selfDestructChange{ + account: &addr, + prev: object.selfDestructed, // todo: must be false? + prevbalance: new(uint256.Int).Set(s.GetBalance(addr)), + }) + + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + // do copy-on-write for suicide "write" + newStateObject := object.lightCopy(s) + newStateObject.markSelfdestructed() + newStateObject.data.Balance = new(uint256.Int) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, + // s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + // s.parallel.kvChangesInSlot[addr] = make(StateKeys) // all key changes are discarded + return + } + + s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist anymore + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + object.markSelfdestructed() + object.data.Balance = new(uint256.Int) +} + +func (s *ParallelStateDB) Selfdestruct6780(addr common.Address) { + object := s.getStateObject(addr) + if object == nil { + return + } + if object.created { + s.SelfDestruct(addr) + } +} + +// CreateAccount explicitly creates a state object. If a state object with the address +// already exists the balance is carried over to the new account. +// +// CreateAccount is called during the EVM CREATE operation. The situation might arise that +// a contract does the following: +// +// 1. sends funds to sha(account ++ (nonce + 1)) +// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) +// +// Carrying over the balance ensures that Ether doesn't disappear. +func (s *ParallelStateDB) CreateAccount(addr common.Address) { + // no matter it is got from dirty, unconfirmed or main DB + // if addr not exist, preBalance will be common.U2560, it is same as new(uint256.Int) which + // is the value newObject(), + preBalance := s.GetBalance(addr) // parallel balance read will be recorded inside GetBalance + newObj := s.createObject(addr) + newObj.setBalance(new(uint256.Int).Set(preBalance)) // new uint256.Int for newObj +} + +// RevertToSnapshot reverts all state changes made since the given revision. +func (s *ParallelStateDB) RevertToSnapshot(revid int) { + // Find the snapshot in the stack of valid snapshots. + idx := sort.Search(len(s.validRevisions), func(i int) bool { + return s.validRevisions[i].id >= revid + }) + if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid { + panic(fmt.Errorf("revision id %v cannot be reverted", revid)) + } + snapshot := s.validRevisions[idx].journalIndex + + // Replay the journal to undo changes and remove invalidated snapshots + s.journal.revert(s, snapshot) + s.validRevisions = s.validRevisions[:idx] +} + +// AddRefund adds gas to the refund counter +// journal.append will use ParallelState for revert +func (s *ParallelStateDB) AddRefund(gas uint64) { // todo: not needed, can be deleted + s.journal.append(refundChange{prev: s.refund}) + s.refund += gas +} + +// SubRefund removes gas from the refund counter. +// This method will panic if the refund counter goes below zero +func (s *ParallelStateDB) SubRefund(gas uint64) { + s.journal.append(refundChange{prev: s.refund}) + if gas > s.refund { + // we don't need to panic here if we read the wrong state in parallel mode + // we just need to redo this transaction + log.Info(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund), "tx", s.thash.String()) + s.parallel.needsRedo = true + return + } + s.refund -= gas +} + +// For Parallel Execution Mode, it can be seen as Penetrated Access: +// +// ------------------------------------------------------- +// | BaseTxIndex | Unconfirmed Txs... | Current TxIndex | +// ------------------------------------------------------- +// +// Access from the unconfirmed DB with range&priority: txIndex -1(previous tx) -> baseTxIndex + 1 +func (s *ParallelStateDB) getBalanceFromUnconfirmedDB(addr common.Address) *uint256.Int { + for i := s.txIndex - 1; i >= 0 && i > s.BaseTxIndex(); i-- { + db_, ok := s.parallel.unconfirmedDBs.Load(i) + if !ok { + continue + } + db := db_.(*ParallelStateDB) + // 1.Refer the state of address, exist or not in dirtiedStateObjectsInSlot + balanceHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + balanceHit = true + } + if _, exist := db.parallel.balanceChangesInSlot[addr]; exist { // only changed balance is reliable + balanceHit = true + } + if !balanceHit { + continue + } + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + balance := obj.Balance() + if obj.deleted { + balance = common.U2560 + } + return balance + + } + return nil +} + +// Similar to getBalanceFromUnconfirmedDB +func (s *ParallelStateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64, bool) { + for i := s.txIndex - 1; i > s.BaseTxIndex(); i-- { + db_, ok := s.parallel.unconfirmedDBs.Load(i) + if !ok { + continue + } + db := db_.(*ParallelStateDB) + + nonceHit := false + if _, ok := db.parallel.addrStateChangesInSlot[addr]; ok { + nonceHit = true + } else if _, ok := db.parallel.nonceChangesInSlot[addr]; ok { + nonceHit = true + } + if !nonceHit { + // nonce refer not hit, try next unconfirmedDb + continue + } + // nonce hit, return the nonce + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get nonce from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + // deleted object with nonce == 0 + if obj.deleted || obj.selfDestructed { + return 0, true + } + nonce := obj.Nonce() + return nonce, true + } + return 0, false +} + +// Similar to getBalanceFromUnconfirmedDB +// It is not only for code, but also codeHash and codeSize, we return the *stateObject for convenience. +func (s *ParallelStateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, bool) { + for i := s.txIndex - 1; i > s.BaseTxIndex(); i-- { + db_, ok := s.parallel.unconfirmedDBs.Load(i) + if !ok { + continue + } + db := db_.(*ParallelStateDB) + + codeHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + codeHit = true + } + if _, exist := db.parallel.codeChangesInSlot[addr]; exist { + codeHit = true + } + if !codeHit { + // try next unconfirmedDb + continue + } + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get code from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + if obj.deleted || obj.selfDestructed { + return nil, true + } + code := obj.Code() + return code, true + } + return nil, false +} + +// Similar to getCodeFromUnconfirmedDB +// but differ when address is deleted or not exist +func (s *ParallelStateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash, bool) { + for i := s.txIndex - 1; i > s.BaseTxIndex(); i-- { + db_, ok := s.parallel.unconfirmedDBs.Load(i) + if !ok { + continue + } + db := db_.(*ParallelStateDB) + + hashHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + hashHit = true + } + if _, exist := db.parallel.codeChangesInSlot[addr]; exist { + hashHit = true + } + if !hashHit { + // try next unconfirmedDb + continue + } + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get codeHash from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + if obj.deleted || obj.selfDestructed { + return common.Hash{}, true + } + codeHash := common.BytesToHash(obj.CodeHash()) + return codeHash, true + } + return common.Hash{}, false +} + +// Similar to getCodeFromUnconfirmedDB +// It is for address state check of: Exist(), Empty() and HasSuicided() +// Since the unconfirmed DB should have done Finalise() with `deleteEmptyObjects = true` +// If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not. +func (s *ParallelStateDB) getAddrStateFromUnconfirmedDB(addr common.Address, testEmpty bool) (bool, bool) { + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.BaseTxIndex(); i-- { + db_, ok := s.parallel.unconfirmedDBs.Load(i) + if !ok { + continue + } + db := db_.(*ParallelStateDB) + if exist, ok := db.parallel.addrStateChangesInSlot[addr]; ok { + if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get addr State from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } else { + if obj.selfDestructed || obj.deleted { + return false, true + } + if testEmpty && obj.empty() { + return false, true + } + } + return exist, true + } + } + return false, false +} + +func (s *ParallelStateDB) getKVFromUnconfirmedDB(addr common.Address, key common.Hash) (common.Hash, bool) { + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.BaseTxIndex(); i-- { + db_, ok := s.parallel.unconfirmedDBs.Load(i) + if !ok { + continue + } + db := db_.(*ParallelStateDB) + if _, ok := db.parallel.kvChangesInSlot[addr]; ok { + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj.deleted || obj.selfDestructed { + return common.Hash{}, true + } + if val, exist := obj.dirtyStorage.GetValue(key); exist { + return val, true + } + } + } + return common.Hash{}, false +} + +func (s *ParallelStateDB) GetStateObjectFromUnconfirmedDB(addr common.Address) (*stateObject, bool) { + return s.getStateObjectFromUnconfirmedDB(addr) +} + +func (s *ParallelStateDB) getStateObjectFromUnconfirmedDB(addr common.Address) (*stateObject, bool) { + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.BaseTxIndex(); i-- { + db_, ok := s.parallel.unconfirmedDBs.Load(i) + if !ok { + continue + } + db := db_.(*ParallelStateDB) + if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return obj, true + } + } + return nil, false +} + +// IsParallelReadsValid If stage2 is true, it is a likely conflict check, +// to detect these potential conflict results in advance and schedule redo ASAP. +func (slotDB *ParallelStateDB) IsParallelReadsValid(isStage2 bool) bool { + parallelKvOnce.Do(func() { + StartKvCheckLoop() + }) + + mainDB := slotDB.parallel.baseStateDB + // for nonce + for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { + if isStage2 { // update slotDB's unconfirmed DB list and try + if nonceUnconfirm, ok := slotDB.getNonceFromUnconfirmedDB(addr); ok { + if nonceSlot != nonceUnconfirm { + log.Debug("IsSlotDBReadsValid nonce read is invalid in unconfirmed", "addr", addr, + "nonceSlot", nonceSlot, "nonceUnconfirm", nonceUnconfirm, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + } + nonceMain := mainDB.GetNonce(addr) + if nonceSlot != nonceMain { + log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, + "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // balance + for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot { + if isStage2 { // update slotDB's unconfirmed DB list and try + if balanceUnconfirm := slotDB.getBalanceFromUnconfirmedDB(addr); balanceUnconfirm != nil { + if balanceSlot.Cmp(balanceUnconfirm) == 0 { + continue + } + return false + } + } + + balanceMain := mainDB.GetBalance(addr) + if balanceSlot.Cmp(balanceMain) != 0 { + log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr, + "balanceSlot", balanceSlot, "balanceMain", balanceMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // check KV + var units []ParallelKvCheckUnit // todo: pre-allocate to make it faster + for addr, read := range slotDB.parallel.kvReadsInSlot { + read.Range(func(keySlot, valSlot interface{}) bool { + units = append(units, ParallelKvCheckUnit{addr, keySlot.(common.Hash), valSlot.(common.Hash)}) + return true + }) + } + readLen := len(units) + // TODO-dav: change back to 8 or 1? + if readLen < 80000 || isStage2 { + for _, unit := range units { + if hasKvConflict(slotDB, unit.addr, unit.key, unit.val, isStage2) { + return false + } + } + } else { + msgHandledNum := 0 + msgSendNum := 0 + for _, unit := range units { + for { // make sure the unit is consumed + consumed := false + select { + case conflict := <-parallelKvCheckResCh: + msgHandledNum++ + if conflict { + // make sure all request are handled or discarded + for { + if msgHandledNum == msgSendNum { + break + } + select { + case <-parallelKvCheckReqCh: + msgHandledNum++ + case <-parallelKvCheckResCh: + msgHandledNum++ + } + } + return false + } + case parallelKvCheckReqCh <- ParallelKvCheckMessage{slotDB, isStage2, unit}: + msgSendNum++ + consumed = true + } + if consumed { + break + } + } + } + for { + if msgHandledNum == readLen { + break + } + conflict := <-parallelKvCheckResCh + msgHandledNum++ + if conflict { + // make sure all request are handled or discarded + for { + if msgHandledNum == msgSendNum { + break + } + select { + case <-parallelKvCheckReqCh: + msgHandledNum++ + case <-parallelKvCheckResCh: + msgHandledNum++ + } + } + return false + } + } + } + if isStage2 { // stage2 skip check code, or state, since they are likely unchanged. + return true + } + + // check code + for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { + codeMain := mainDB.GetCode(addr) + if !bytes.Equal(codeSlot, codeMain) { + log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, + "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // check codeHash + for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { + codeHashMain := mainDB.GetCodeHash(addr) + if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { + log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, + "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // addr state check + for addr, stateSlot := range slotDB.parallel.addrStateReadsInSlot { + stateMain := false // addr not exist + if mainDB.getStateObject(addr) != nil { + stateMain = true // addr exist in main DB + } + if stateSlot != stateMain { + log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", + "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // snapshot destructs check + for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot { + mainObj := mainDB.getStateObject(addr) + if mainObj == nil { + log.Debug("IsSlotDBReadsValid snapshot destructs read invalid, address should exist", + "addr", addr, "destruct", destructRead, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + slotDB.snapParallelLock.RLock() // fixme: this lock is not needed + _, destructMain := mainDB.snapDestructs[addr] // addr not exist + slotDB.snapParallelLock.RUnlock() + if destructRead != destructMain { + log.Debug("IsSlotDBReadsValid snapshot destructs read invalid", + "addr", addr, "destructRead", destructRead, "destructMain", destructMain, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + return true +} + +// NeedsRedo returns true if there is any clear reason that we need to redo this transaction +func (s *ParallelStateDB) NeedsRedo() bool { + return s.parallel.needsRedo +} + +// FinaliseForParallel finalises the state by removing the destructed objects and clears +// the journal as well as the refunds. Finalise, however, will not push any updates +// into the tries just yet. Only IntermediateRoot or Commit will do that. +// It also handle the mainDB dirties for the first TX. +func (s *ParallelStateDB) FinaliseForParallel(deleteEmptyObjects bool, mainDB *StateDB) { + addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) + + if s.TxIndex() == 0 && len(mainDB.journal.dirties) > 0 { + for addr := range mainDB.journal.dirties { + var obj *stateObject + var exist bool + + obj, exist = mainDB.getStateObjectFromStateObjects(addr) + if !exist { + // ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2 + // That tx goes out of gas, and although the notion of 'touched' does not exist there, the + // touch-event will still be recorded in the journal. Since ripeMD is a special snowflake, + // it will persist in the journal even though the journal is reverted. In this special circumstance, + // it may exist in `s.journal.dirties` but not in `s.stateObjects`. + // Thus, we can safely ignore it here + + continue + } + + if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) { + obj.deleted = true + + // We need to maintain account deletions explicitly (will remain + // set indefinitely). Note only the first occurred self-destruct + // event is tracked. + if _, ok := mainDB.stateObjectsDestruct[obj.address]; !ok { + mainDB.stateObjectsDestruct[obj.address] = obj.origin + } + // Note, we can't do this only at the end of a block because multiple + // transactions within the same block might self destruct and then + // resurrect an account; but the snapshotter needs both events. + delete(mainDB.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) + delete(mainDB.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) + delete(mainDB.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect) + delete(mainDB.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) + } else { + obj.finalise(true) // Prefetch slots in the background + } + + obj.created = false + mainDB.stateObjectsPending[addr] = struct{}{} + mainDB.stateObjectsDirty[addr] = struct{}{} + + // At this point, also ship the address off to the precacher. The precacher + // will start loading tries, and when the change is eventually committed, + // the commit-phase will be a lot faster + addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure + } + mainDB.clearJournalAndRefund() + } + + for addr := range s.journal.dirties { + var obj *stateObject + var exist bool + if s.parallel.isSlotDB { + obj = s.parallel.dirtiedStateObjectsInSlot[addr] + if obj != nil { + exist = true + } else { + log.Error("StateDB Finalise dirty addr not in dirtiedStateObjectsInSlot", + "addr", addr) + } + } else { + obj, exist = s.getStateObjectFromStateObjects(addr) + } + if !exist { + // ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2 + // That tx goes out of gas, and although the notion of 'touched' does not exist there, the + // touch-event will still be recorded in the journal. Since ripeMD is a special snowflake, + // it will persist in the journal even though the journal is reverted. In this special circumstance, + // it may exist in `s.journal.dirties` but not in `s.stateObjects`. + // Thus, we can safely ignore it here + continue + } + + if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) { + obj.deleted = true + + // We need to maintain account deletions explicitly (will remain + // set indefinitely). Note only the first occurred self-destruct + // event is tracked. + if _, ok := s.stateObjectsDestruct[obj.address]; !ok { + s.stateObjectsDestruct[obj.address] = obj.origin + } + // Note, we can't do this only at the end of a block because multiple + // transactions within the same block might self destruct and then + // resurrect an account; but the snapshotter needs both events. + delete(s.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) + delete(s.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) + delete(s.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect) + delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) + + if s.parallel.isSlotDB { + s.parallel.accountsDeletedRecord = append(s.parallel.accountsDeletedRecord, obj.addrHash) + s.parallel.storagesDeleteRecord = append(s.parallel.storagesDeleteRecord, obj.addrHash) + s.parallel.accountsOriginDeleteRecord = append(s.parallel.accountsOriginDeleteRecord, obj.address) + s.parallel.storagesOriginDeleteRecord = append(s.parallel.storagesOriginDeleteRecord, obj.address) + } + } else { + // 1.none parallel mode, we do obj.finalise(true) as normal + // 2.with parallel mode, we do obj.finalise(true) on dispatcher, not on slot routine + // obj.finalise(true) will clear its dirtyStorage, will make prefetch broken. + if !s.isParallel || !s.parallel.isSlotDB { + obj.finalise(true) // Prefetch slots in the background + } + } + + obj.created = false + s.stateObjectsPending[addr] = struct{}{} + s.stateObjectsDirty[addr] = struct{}{} + // At this point, also ship the address off to the precacher. The precacher + // will start loading tries, and when the change is eventually committed, + // the commit-phase will be a lot faster + addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure + } + + if mainDB.prefetcher != nil && len(addressesToPrefetch) > 0 { + mainDB.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch) + } + // Invalidate journal because reverting across transactions is not allowed. + s.clearJournalAndRefund() +} + +// IntermediateRootForSlotDB computes the current root hash of the state trie. +// It is called in between transactions to get the root hash that +// goes into transaction receipts. +// For parallel SlotDB, the intermediateRoot can be used to calculate the temporary root after executing single tx. +func (s *ParallelStateDB) IntermediateRootForSlotDB(deleteEmptyObjects bool, mainDB *StateDB) common.Hash { + // Finalise all the dirty storage states and write them into the tries + s.FinaliseForParallel(deleteEmptyObjects, mainDB) + // If there was a trie prefetcher operating, it gets aborted and irrevocably + // modified after we start retrieving tries. Remove it from the statedb after + // this round of use. + // + // This is weird pre-byzantium since the first tx runs with a prefetcher and + // the remainder without, but pre-byzantium even the initial prefetcher is + // useless, so no sleep lost. + prefetcher := mainDB.prefetcher + if mainDB.prefetcher != nil { + defer func() { + mainDB.prefetcher.close() + mainDB.prefetcher = nil + }() + } + + if s.TxIndex() == 0 && len(mainDB.stateObjectsPending) > 0 { + for addr := range mainDB.stateObjectsPending { + var obj *stateObject + if obj, _ = mainDB.getStateObjectFromStateObjects(addr); !obj.deleted { + obj.updateRoot() + } + } + } + + // Although naively it makes sense to retrieve the account trie and then do + // the contract storage and account updates sequentially, that short circuits + // the account prefetcher. Instead, let's process all the storage updates + // first, giving the account prefetches just a few more milliseconds of time + // to pull useful data from disk. + for addr := range s.stateObjectsPending { + var obj *stateObject + if s.parallel.isSlotDB { + if obj = s.parallel.dirtiedStateObjectsInSlot[addr]; !obj.deleted { + obj.updateRoot() + } + } else { + if obj, _ = s.getStateObjectFromStateObjects(addr); !obj.deleted { + obj.updateRoot() + } + } + } + // Now we're about to start to write changes to the trie. The trie is so far + // _untouched_. We can check with the prefetcher, if it can give us a trie + // which has the same root, but also has some content loaded into it. + // The parallel execution do the change incrementally, so can not check the prefetcher here + if prefetcher != nil { + if trie := prefetcher.trie(common.Hash{}, mainDB.originalRoot); trie != nil { + mainDB.trie = trie + } + } + + usedAddrs := make([][]byte, 0, len(s.stateObjectsPending)) + + if s.TxIndex() == 0 && len(mainDB.stateObjectsPending) > 0 { + usedAddrs = make([][]byte, 0, len(s.stateObjectsPending)+len(mainDB.stateObjectsPending)) + for addr := range mainDB.stateObjectsPending { + if obj, _ := s.getStateObjectFromStateObjects(addr); obj.deleted { + mainDB.deleteStateObject(obj) + mainDB.AccountDeleted += 1 + } else { + mainDB.updateStateObject(obj) + mainDB.AccountUpdated += 1 + } + usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure + } + } + + for addr := range s.stateObjectsPending { + if s.parallel.isSlotDB { + if obj := s.parallel.dirtiedStateObjectsInSlot[addr]; obj.deleted { + mainDB.deleteStateObject(obj) + mainDB.AccountDeleted += 1 + } else { + mainDB.updateStateObject(obj) + mainDB.AccountUpdated += 1 + } + } else if obj, _ := s.getStateObjectFromStateObjects(addr); obj.deleted { + mainDB.deleteStateObject(obj) + mainDB.AccountDeleted += 1 + } else { + mainDB.updateStateObject(obj) + mainDB.AccountUpdated += 1 + } + usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure + } + + if prefetcher != nil { + prefetcher.used(common.Hash{}, mainDB.originalRoot, usedAddrs) + } + // parallel slotDB trie will be updated to mainDB since intermediateRoot happens after conflict check. + // so it should be save to clear pending here. + // otherwise there can be a case that the deleted object get ignored and processes as live object in verify phase. + + if s.TxIndex() == 0 && len(mainDB.stateObjectsPending) > 0 { + mainDB.stateObjectsPending = make(map[common.Address]struct{}) + } + + if /*s.isParallel == false &&*/ len(s.stateObjectsPending) > 0 { + s.stateObjectsPending = make(map[common.Address]struct{}) + } + // Track the amount of time wasted on hashing the account trie + if metrics.EnabledExpensive { + defer func(start time.Time) { mainDB.AccountHashes += time.Since(start) }(time.Now()) + } + ret := mainDB.trie.Hash() + return ret +} diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go index 8a0fd1989a..65d3af7525 100644 --- a/core/state/snapshot/conversion.go +++ b/core/state/snapshot/conversion.go @@ -243,7 +243,7 @@ func runReport(stats *generateStats, stop chan bool) { // generateTrieRoot generates the trie hash based on the snapshot iterator. // It can be used for generating account trie, storage trie or even the // whole state which connects the accounts and the corresponding storages. -func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) { +func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accountExt common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) { var ( in = make(chan trieKV) // chan to pass leaves out = make(chan common.Hash, 1) // chan to collect result @@ -254,7 +254,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou wg.Add(1) go func() { defer wg.Done() - generatorFn(db, scheme, account, in, out) + generatorFn(db, scheme, accountExt, in, out) }() // Spin up a go-routine for progress logging if report && stats != nil { @@ -294,12 +294,13 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou ) // Start to feed leaves for it.Next() { - if account == (common.Hash{}) { + if accountExt == (common.Hash{}) { var ( err error fullData []byte ) if leafCallback == nil { + fullData, err = types.FullAccountRLP(it.(AccountIterator).Account()) if err != nil { return stop(err) @@ -323,7 +324,12 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou return } if account.Root != subroot { - results <- fmt.Errorf("invalid subroot(path %x), want %x, have %x", hash, account.Root, subroot) + + // results <- fmt.Errorf("invalid subroot(path %x), want %x, have %x", hash, account.Root, subroot) + + results <- fmt.Errorf("invalid subroot(path %x), want %x, have %x\n accountEXT: %s, account.ROOT: %v, codehash: %s\n", + hash, account.Root, subroot, accountExt.Hex(), account.Root, common.Bytes2Hex(account.CodeHash)) + return } results <- nil @@ -342,20 +348,20 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou // Accumulate the generation statistic if it's required. processed++ if time.Since(logged) > 3*time.Second && stats != nil { - if account == (common.Hash{}) { + if accountExt == (common.Hash{}) { stats.progressAccounts(it.Hash(), processed) } else { - stats.progressContract(account, it.Hash(), processed) + stats.progressContract(accountExt, it.Hash(), processed) } logged, processed = time.Now(), 0 } } // Commit the last part statistic. if processed > 0 && stats != nil { - if account == (common.Hash{}) { + if accountExt == (common.Hash{}) { stats.finishAccounts(processed) } else { - stats.finishContract(account, processed) + stats.finishContract(accountExt, processed) } } return stop(nil) diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go index 70c9f44189..8bd863b350 100644 --- a/core/state/snapshot/difflayer.go +++ b/core/state/snapshot/difflayer.go @@ -458,6 +458,7 @@ func (dl *diffLayer) flatten() snapshot { comboData[storageHash] = data } } + // Return the combo parent return &diffLayer{ parent: parent.parent, diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 3077468b48..807a10c35f 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -369,7 +369,6 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m // Save the new snapshot for later t.lock.Lock() defer t.lock.Unlock() - t.layers[snap.root] = snap return nil } @@ -412,7 +411,6 @@ func (t *Tree) Cap(root common.Hash, layers int) error { diff.lock.RLock() base := diffToDisk(diff.flatten().(*diffLayer)) diff.lock.RUnlock() - // Replace the entire snapshot tree with the flat base t.layers = map[common.Hash]snapshot{base.root: base} return nil @@ -519,7 +517,6 @@ func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer { bottom.lock.RLock() base := diffToDisk(bottom) bottom.lock.RUnlock() - t.layers[base.root] = base diff.parent = base return base @@ -752,6 +749,7 @@ func (t *Tree) Rebuild(root common.Hash) { // Start generating a new snapshot from scratch on a background thread. The // generator will run a wiper first if there's not one running right now. log.Info("Rebuilding state snapshot") + t.layers = map[common.Hash]snapshot{ root: generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root), } @@ -798,7 +796,6 @@ func (t *Tree) Verify(root common.Hash) error { return common.Hash{}, err } defer storageIt.Release() - hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false) if err != nil { return common.Hash{}, err diff --git a/core/state/state_object.go b/core/state/state_object.go index 8696557845..e12c274b4b 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "io" + "math/big" "sync" "time" @@ -34,29 +35,114 @@ import ( "github.com/holiman/uint256" ) +var emptyCodeHash = crypto.Keccak256(nil) + type Code []byte func (c Code) String() string { return string(c) //strings.Join(Disassemble(c), " ") } -type Storage map[common.Hash]common.Hash +type Storage interface { + String() string + GetValue(hash common.Hash) (common.Hash, bool) + StoreValue(hash common.Hash, value common.Hash) + Length() (length int) + Copy() Storage + Range(func(key, value interface{}) bool) +} + +type StorageMap map[common.Hash]common.Hash -func (s Storage) String() (str string) { +func (s StorageMap) String() (str string) { for key, value := range s { str += fmt.Sprintf("%X : %X\n", key, value) } return } -func (s Storage) Copy() Storage { - cpy := make(Storage, len(s)) +func (s StorageMap) Copy() Storage { + cpy := make(StorageMap, len(s)) for key, value := range s { cpy[key] = value } + return cpy } +func (s StorageMap) GetValue(hash common.Hash) (common.Hash, bool) { + value, ok := s[hash] + return value, ok +} + +func (s StorageMap) StoreValue(hash common.Hash, value common.Hash) { + s[hash] = value +} + +func (s StorageMap) Length() int { + return len(s) +} + +func (s StorageMap) Range(f func(hash, value interface{}) bool) { + for k, v := range s { + result := f(k, v) + if !result { + return + } + } +} + +type StorageSyncMap struct { + sync.Map +} + +func (s *StorageSyncMap) String() (str string) { + s.Range(func(key, value interface{}) bool { + str += fmt.Sprintf("%X : %X\n", key, value) + return true + }) + + return +} + +func (s *StorageSyncMap) GetValue(hash common.Hash) (common.Hash, bool) { + value, ok := s.Load(hash) + if !ok { + return common.Hash{}, ok + } + + return value.(common.Hash), ok +} + +func (s *StorageSyncMap) StoreValue(hash common.Hash, value common.Hash) { + s.Store(hash, value) +} + +func (s *StorageSyncMap) Length() (length int) { + s.Range(func(key, value interface{}) bool { + length++ + return true + }) + return length +} + +func (s *StorageSyncMap) Copy() Storage { + cpy := StorageSyncMap{} + s.Range(func(key, value interface{}) bool { + cpy.Store(key, value) + return true + }) + + return &cpy +} + +func newStorage(isParallel bool) Storage { + if isParallel { + return &StorageSyncMap{} + } + return make(StorageMap) +} + // stateObject represents an Ethereum account which is being modified. // // The usage pattern is as follows: @@ -64,7 +150,8 @@ func (s Storage) Copy() Storage { // - Account values as well as storages can be accessed and modified through the object. // - Finally, call commit to return the changes of storage trie and update account data. type stateObject struct { - db *StateDB + db *StateDB // The baseDB for parallel. + dbItf StateDBer // The slotDB for parallel. address common.Address // address of ethereum account addrHash common.Hash // hash of ethereum address of the account origin *types.StateAccount // Account original data without any change applied, nil means it was not existent @@ -74,6 +161,10 @@ type stateObject struct { trie Trie // storage trie, which becomes non-nil on first access code Code // contract bytecode, which gets set when code is loaded + // isParallel indicates this state object is used in parallel mode, in which mode the + // storage would be sync.Map instead of map + isParallel bool + originStorage Storage // Storage cache of original entries to dedup rewrites pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block dirtyStorage Storage // Storage entries that have been modified in the current transaction execution, reset for every transaction @@ -96,11 +187,55 @@ type stateObject struct { // empty returns whether the account is considered empty. func (s *stateObject) empty() bool { - return s.data.Nonce == 0 && s.data.Balance.IsZero() && bytes.Equal(s.data.CodeHash, types.EmptyCodeHash.Bytes()) + // return s.data.Nongn() == 0 && bytes.ce == 0 && s.data.Balance.SiEqual(s.data.CodeHash, types.EmptyCodeHash.Bytes()) + // return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash) + + // empty() has 3 use cases: + // 1.StateDB.Empty(), to empty check + // A: It is ok, we have handled it in Empty(), to make sure nonce, balance, codeHash are solid + // 2:AddBalance 0, empty check for touch event + // empty() will add a touch event. + // if we misjudge it, the touch event could be lost, which make address not deleted. // fixme + // 3.Finalise(), to do empty delete + // the address should be dirtied or touched + // if it nonce dirtied, it is ok, since nonce is monotonically increasing, won't be zero + // if balance is dirtied, balance could be zero, we should refer solid nonce & codeHash // fixme + // if codeHash is dirtied, it is ok, since code will not be updated. + // if suicide, it is ok + // if object is new created, it is ok + // if CreateAccount, recreate the address, it is ok. + + // Slot 0 tx 0: AddBalance(100) to addr_1, => addr_1: balance = 100, nonce = 0, code is empty + // Slot 1 tx 1: addr_1 Transfer 99.9979 with GasFee 0.0021, => addr_1: balance = 0, nonce = 1, code is empty + // notice: balance transfer cost 21,000 gas, with gasPrice = 100Gwei, GasFee will be 0.0021 + // Slot 0 tx 2: add balance 0 to addr_1(empty check for touch event), + // the object was lightCopied from tx 0, + + // in parallel mode, we should not check empty by raw nonce, balance, codeHash anymore, + // since it could be invalid. + // e.g., AddBalance() to an address, we will do lightCopy to get a new StateObject, we did balance fixup to + // make sure object's Balance is reliable. But we did not fixup nonce or code, we only do nonce or codehash + // fixup on need, that's when we wanna to update the nonce or codehash. + // So nonce, balance + // Before the block is processed, addr_1 account: nonce = 0, emptyCodeHash, balance = 100 + // Slot 0 tx 0: no access to addr_1 + // Slot 1 tx 1: sub balance 100, it is empty and deleted + // Slot 0 tx 2: GetNonce, lightCopy based on main DB(balance = 100) , not empty + + if s.dbItf.GetBalance(s.address).Sign() != 0 { // check balance first, since it is most likely not zero + return false + } + if s.dbItf.GetNonce(s.address) != 0 { + return false + } + codeHash := s.dbItf.GetCodeHash(s.address) + return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty + } // newObject creates a state object. -func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *stateObject { +func newObject(dbItf StateDBer, isParallel bool, address common.Address, acct *types.StateAccount) *stateObject { + db := dbItf.getBaseStateDB() var ( origin = acct created = acct == nil // true if the account was not existent @@ -110,13 +245,15 @@ func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *s } return &stateObject{ db: db, + dbItf: dbItf, address: address, addrHash: crypto.Keccak256Hash(address[:]), origin: origin, data: *acct, - originStorage: make(Storage), - pendingStorage: make(Storage), - dirtyStorage: make(Storage), + isParallel: isParallel, + originStorage: newStorage(isParallel), + pendingStorage: newStorage(isParallel), + dirtyStorage: newStorage(isParallel), created: created, } } @@ -165,7 +302,7 @@ func (s *stateObject) getTrie() (Trie, error) { // GetState retrieves a value from the account storage trie. func (s *stateObject) GetState(key common.Hash) common.Hash { // If we have a dirty value for this state entry, return it - value, dirty := s.dirtyStorage[key] + value, dirty := s.dirtyStorage.GetValue(key) if dirty { return value } @@ -176,21 +313,45 @@ func (s *stateObject) GetState(key common.Hash) common.Hash { // GetCommittedState retrieves a value from the committed account storage trie. func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { // If we have a pending write or clean cached, return that - if value, pending := s.pendingStorage[key]; pending { + // if value, pending := s.pendingStorage[key]; pending { + if value, pending := s.pendingStorage.GetValue(key); pending { return value } - if value, cached := s.originStorage[key]; cached { + if value, cached := s.originStorage.GetValue(key); cached { return value } + + // Add-Dav: + // Need to confirm the object is not destructed in unconfirmed db and resurrected in this tx. + // otherwise there is an issue for cases like: + // B0: TX0 --> createAccount @addr1 -- merged into DB + // B1: Tx1 and Tx2 + // Tx1 account@addr1, setState(key0), setState(key1) selfDestruct -- unconfirmed + // Tx2 recreate account@addr2, setState(key0) -- executing + // TX2 GetState(addr2, key1) --- + // key1 is never set after recurrsect, and should not return state in trie as it destructed in unconfirmed + // TODO - dav: do we need try storages from unconfirmedDB? - currently not because conflict detection need it for get from mainDB. + obj, exist := s.dbItf.GetStateObjectFromUnconfirmedDB(s.address) + if exist { + if obj.deleted || obj.selfDestructed { + return common.Hash{} + } + } + // If the object was destructed in *this* block (and potentially resurrected), // the storage has been cleared out, and we should *not* consult the previous // database about any storage values. The only possible alternatives are: // 1) resurrect happened, and new slot values were set -- those should // have been handles via pendingStorage above. // 2) we don't have new values, and can deliver empty response back - if _, destructed := s.db.stateObjectsDestruct[s.address]; destructed { + //if _, destructed := s.db.stateObjectsDestruct[s.address]; destructed { + s.db.snapParallelLock.RLock() + if _, destructed := s.db.stateObjectsDestruct[s.address]; destructed { // fixme: use sync.Map, instead of RWMutex? + s.db.snapParallelLock.RUnlock() return common.Hash{} } + s.db.snapParallelLock.RUnlock() + // If no live objects are available, attempt to use snapshots var ( enc []byte @@ -229,14 +390,22 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { } value.SetBytes(val) } - s.originStorage[key] = value + s.originStorage.StoreValue(key, value) + return value } // SetState updates a value in account storage. func (s *stateObject) SetState(key, value common.Hash) { // If the new value is the same as old, don't set - prev := s.GetState(key) + // In parallel mode, it has to get from StateDB, in case: + // a.the Slot did not set the key before and try to set it to `val_1` + // b.Unconfirmed DB has set the key to `val_2` + // c.if we use StateObject.GetState, and the key load from the main DB is `val_1` + // this `SetState could be skipped` + // d.Finally, the key's value will be `val_2`, while it should be `val_1` + // such as: https://bscscan.com/txs?block=2491181 + prev := s.dbItf.GetState(s.address, key) if prev == value { return } @@ -246,28 +415,35 @@ func (s *stateObject) SetState(key, value common.Hash) { key: key, prevalue: prev, }) + + if s.db.parallel.isSlotDB { + s.db.parallel.kvChangesInSlot[s.address][key] = struct{}{} // should be moved to here, after `s.db.GetState()` + } s.setState(key, value) } func (s *stateObject) setState(key, value common.Hash) { - s.dirtyStorage[key] = value + s.dirtyStorage.StoreValue(key, value) } // finalise moves all dirty storage slots into the pending area to be hashed or // committed later. It is invoked at the end of every transaction. func (s *stateObject) finalise(prefetch bool) { - slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) - for key, value := range s.dirtyStorage { - s.pendingStorage[key] = value - if value != s.originStorage[key] { - slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure + slotsToPrefetch := make([][]byte, 0, s.dirtyStorage.Length()) + s.dirtyStorage.Range(func(key, value interface{}) bool { + s.pendingStorage.StoreValue(key.(common.Hash), value.(common.Hash)) + originalValue, _ := s.originStorage.GetValue(key.(common.Hash)) + if value.(common.Hash) != originalValue { + originalKey := key.(common.Hash) + slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(originalKey[:])) // Copy needed for closure } - } + return true + }) if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch) } - if len(s.dirtyStorage) > 0 { - s.dirtyStorage = make(Storage) + if s.dirtyStorage.Length() > 0 { + s.dirtyStorage = newStorage(s.isParallel) } } @@ -282,7 +458,7 @@ func (s *stateObject) updateTrie() (Trie, error) { s.finalise(false) // Short circuit if nothing changed, don't bother with hashing anything - if len(s.pendingStorage) == 0 { + if s.pendingStorage.Length() == 0 { return s.trie, nil } // Track the amount of time wasted on updating the storage trie @@ -300,14 +476,18 @@ func (s *stateObject) updateTrie() (Trie, error) { s.db.setError(err) return nil, err } + // Insert all the pending storage updates into the trie - usedStorage := make([][]byte, 0, len(s.pendingStorage)) + usedStorage := make([][]byte, 0, s.pendingStorage.Length()) dirtyStorage := make(map[common.Hash][]byte) - for key, value := range s.pendingStorage { + s.pendingStorage.Range(func(keyItf, valueItf interface{}) bool { + key := keyItf.(common.Hash) + value := valueItf.(common.Hash) // Skip noop changes, persist actual changes - if value == s.originStorage[key] { - continue + originalValue, _ := s.originStorage.GetValue(key) + if value == originalValue { + return true } var v []byte if value != (common.Hash{}) { @@ -315,7 +495,8 @@ func (s *stateObject) updateTrie() (Trie, error) { v = common.TrimLeftZeroes(value[:]) } dirtyStorage[key] = v - } + return true + }) var wg sync.WaitGroup wg.Add(1) go func() { @@ -365,8 +546,8 @@ func (s *stateObject) updateTrie() (Trie, error) { storage[khash] = encoded // encoded will be nil if it's deleted // Track the original value of slot only if it's mutated first time - prev := s.originStorage[key] - s.originStorage[key] = common.BytesToHash(value) // fill back left zeroes by BytesToHash + prev, _ := s.originStorage.GetValue(key) + s.originStorage.StoreValue(key, common.BytesToHash(value)) // fill back left zeroes by BytesToHash if _, ok := origin[khash]; !ok { if prev == (common.Hash{}) { origin[khash] = nil // nil if it was not present previously @@ -383,7 +564,7 @@ func (s *stateObject) updateTrie() (Trie, error) { if s.db.prefetcher != nil { s.db.prefetcher.used(s.addrHash, s.data.Root, usedStorage) } - s.pendingStorage = make(Storage) // reset pending map + s.pendingStorage = newStorage(s.isParallel) // reset pending map return tr, nil } @@ -434,6 +615,7 @@ func (s *stateObject) commit() (*trienode.NodeSet, error) { // Update original account data after commit s.origin = s.data.Copy() + return nodes, nil } @@ -472,13 +654,57 @@ func (s *stateObject) setBalance(amount *uint256.Int) { s.data.Balance = amount } +// ReturnGas Return the gas back to the origin. Used by the Virtual machine or Closures +func (s *stateObject) ReturnGas(gas *big.Int) {} + +func (s *stateObject) lightCopy(db *ParallelStateDB) *stateObject { + object := newObject(db, s.isParallel, s.address, &s.data) + object.code = s.code + object.selfDestructed = s.selfDestructed // should be false + object.dirtyCode = s.dirtyCode // it is not used in slot, but keep it is ok + object.deleted = s.deleted // should be false + + // we must copy because it is possible that s comes from unconfirmedDB and hence storage is necessary. + // otherwise there is problem that the light copied obj is in dirty and addrStateChangeInSlot is marked, but + // GetState get empty from storages and load from mainDB, which is inconsistent with real execution. + // Moreover, as the wrong object already in dirty, no KVStateRead recorded. and hence can not identified in + // conflict detection. + // example: block contains tx1 tx2 + // after execution of tx1, it store object@theAddr in unconfirmedDB. + // at tx2, it first AddBalance of theAddr, which cause lightcopy and store in dirty, and mark the addrStateChangeInSlot + // Then the GetState(theAddr) find addrStateChangeInSlot and get obj in dirty, but the slot is empty so it load from + // mainDB, and return is inconsistent with unconfirmedDB of Tx1 result. (and as object in dirty, it doesn't mark KVStateRead.) + if object.address.Hex() == "0x864BbDA5C698aC34b47a9ea3BD4228802cC5ce3b" { + fmt.Printf("Dav -- ligthCopy -- update storage :%s\n storages:\ndirty:", object.address.Hex()) + s.dirtyStorage.Range(func(key, value interface{}) bool { + fmt.Printf("key: %s, value: %s\n", + key.(common.Hash), value.(common.Hash)) + return true + }) + fmt.Printf("\npending:\n") + s.pendingStorage.Range(func(key, value interface{}) bool { + fmt.Printf("key: %s, value: %s\n", + key.(common.Hash), value.(common.Hash)) + return true + }) + } + + object.dirtyStorage = s.dirtyStorage.Copy() + object.originStorage = s.originStorage.Copy() + object.pendingStorage = s.pendingStorage.Copy() + + return object +} + func (s *stateObject) deepCopy(db *StateDB) *stateObject { obj := &stateObject{ - db: db, - address: s.address, - addrHash: s.addrHash, - origin: s.origin, - data: s.data, + db: db.getBaseStateDB(), + dbItf: db, + address: s.address, + addrHash: s.addrHash, + origin: s.origin, + data: s.data, + isParallel: s.isParallel, } if s.trie != nil { obj.trie = db.db.CopyTrie(s.trie) @@ -493,6 +719,15 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject { return obj } +func (s *stateObject) MergeSlotObject(db Database, dirtyObjs *stateObject, keys StateKeys) { + for key := range keys { + // In parallel mode, always GetState by StateDB, not by StateObject directly, + // since it the KV could exist in unconfirmed DB. + // But here, it should be ok, since the KV should be changed and valid in the SlotDB, + s.setState(key, dirtyObjs.GetState(key)) + } +} + // // Attribute accessors // @@ -507,13 +742,16 @@ func (s *stateObject) Code() []byte { if s.code != nil { return s.code } + if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { return nil } + code, err := s.db.db.ContractCode(s.address, common.BytesToHash(s.CodeHash())) if err != nil { s.db.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err)) } + s.code = code return code } @@ -536,7 +774,7 @@ func (s *stateObject) CodeSize() int { } func (s *stateObject) SetCode(codeHash common.Hash, code []byte) { - prevcode := s.Code() + prevcode := s.dbItf.GetCode(s.address) s.db.journal.append(codeChange{ account: &s.address, prevhash: s.CodeHash(), @@ -553,9 +791,10 @@ func (s *stateObject) setCode(codeHash common.Hash, code []byte) { } func (s *stateObject) SetNonce(nonce uint64) { + prevNonce := s.dbItf.GetNonce(s.address) s.db.journal.append(nonceChange{ account: &s.address, - prev: s.data.Nonce, + prev: prevNonce, }) s.setNonce(nonce) } diff --git a/core/state/state_test.go b/core/state/state_test.go index 9be610f962..063d4b5567 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -269,30 +269,46 @@ func compareStateObjects(so0, so1 *stateObject, t *testing.T) { t.Fatalf("Code mismatch: have %v, want %v", so0.code, so1.code) } - if len(so1.dirtyStorage) != len(so0.dirtyStorage) { - t.Errorf("Dirty storage size mismatch: have %d, want %d", len(so1.dirtyStorage), len(so0.dirtyStorage)) + if so1.dirtyStorage.Length() != so0.dirtyStorage.Length() { + t.Errorf("Dirty storage size mismatch: have %d, want %d", so1.dirtyStorage.Length(), so0.dirtyStorage.Length()) } - for k, v := range so1.dirtyStorage { - if so0.dirtyStorage[k] != v { - t.Errorf("Dirty storage key %x mismatch: have %v, want %v", k, so0.dirtyStorage[k], v) + so1.dirtyStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so0.dirtyStorage.GetValue(k); tmpV != v { + t.Errorf("Dirty storage key %x mismatch: have %v, want %v", k, tmpV.String(), v) } - } - for k, v := range so0.dirtyStorage { - if so1.dirtyStorage[k] != v { + return true + }) + + so0.dirtyStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so1.dirtyStorage.GetValue(k); tmpV != v { t.Errorf("Dirty storage key %x mismatch: have %v, want none.", k, v) } + return true + }) + + if so1.originStorage.Length() != so0.originStorage.Length() { + t.Errorf("Origin storage size mismatch: have %d, want %d", so1.originStorage.Length(), so0.originStorage.Length()) } - if len(so1.originStorage) != len(so0.originStorage) { - t.Errorf("Origin storage size mismatch: have %d, want %d", len(so1.originStorage), len(so0.originStorage)) - } - for k, v := range so1.originStorage { - if so0.originStorage[k] != v { - t.Errorf("Origin storage key %x mismatch: have %v, want %v", k, so0.originStorage[k], v) + + so1.originStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so0.originStorage.GetValue(k); tmpV != v { + t.Errorf("Origin storage key %x mismatch: have %v, want %v", k, tmpV, v) } - } - for k, v := range so0.originStorage { - if so1.originStorage[k] != v { + return true + }) + + so0.originStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so1.originStorage.GetValue(k); tmpV != v { t.Errorf("Origin storage key %x mismatch: have %v, want none.", k, v) } - } + return true + }) } diff --git a/core/state/statedb.go b/core/state/statedb.go index f5464eb23c..a2780c2a03 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -18,6 +18,7 @@ package state import ( + "bytes" "fmt" "runtime" "sort" @@ -51,6 +52,116 @@ type revision struct { journalIndex int } +var emptyAddr = common.Address{} + +type StateKeys map[common.Hash]struct{} + +type StateObjectSyncMap struct { + sync.Map +} + +func (s *StateObjectSyncMap) LoadStateObject(addr common.Address) (*stateObject, bool) { + so, ok := s.Load(addr) + if !ok { + return nil, ok + } + return so.(*stateObject), ok +} + +func (s *StateObjectSyncMap) StoreStateObject(addr common.Address, stateObject *stateObject) { + s.Store(addr, stateObject) +} + +// loadStateObj is the entry for loading state object from stateObjects in StateDB or stateObjects in parallel +func (s *StateDB) loadStateObj(addr common.Address) (*stateObject, bool) { + + if s.isParallel { + ret, ok := s.parallel.stateObjects.LoadStateObject(addr) + return ret, ok + } + + obj, ok := s.stateObjects[addr] + return obj, ok +} + +// storeStateObj is the entry for storing state object to stateObjects in StateDB or stateObjects in parallel +func (s *StateDB) storeStateObj(addr common.Address, stateObject *stateObject) { + if s.isParallel { + // When a state object is stored into s.parallel.stateObjects, + // it belongs to base StateDB, it is confirmed and valid. + // TODO-dav: remove the lock/unlock? + stateObject.db.storeParallelLock.Lock() + s.parallel.stateObjects.Store(addr, stateObject) + stateObject.db.storeParallelLock.Unlock() + } else { + s.stateObjects[addr] = stateObject + } +} + +// deleteStateObj is the entry for deleting state object to stateObjects in StateDB or stateObjects in parallel +func (s *StateDB) deleteStateObj(addr common.Address) { + if s.isParallel { + s.parallel.stateObjects.Delete(addr) + } else { + delete(s.stateObjects, addr) + } +} + +// ParallelState is for parallel mode only +type ParallelState struct { + isSlotDB bool // denotes StateDB is used in slot, we will try to remove it + SlotIndex int // for debug, to be removed + // stateObjects holds the state objects in the base slot db + // the reason for using stateObjects instead of stateObjects on the outside is + // we need a thread safe map to hold state objects since there are many slots will read + // state objects from it; + // And we will merge all the changes made by the concurrent slot into it. + stateObjects *StateObjectSyncMap + + baseStateDB *StateDB // for parallel mode, there will be a base StateDB in dispatcher routine. + baseTxIndex int // slotDB is created base on this tx index. + dirtiedStateObjectsInSlot map[common.Address]*stateObject + unconfirmedDBs *sync.Map /*map[int]*ParallelStateDB*/ // do unconfirmed reference in same slot. + + // we will record the read detail for conflict check and + // the changed addr or key for object merge, the changed detail can be achieved from the dirty object + nonceChangesInSlot map[common.Address]struct{} + nonceReadsInSlot map[common.Address]uint64 + balanceChangesInSlot map[common.Address]struct{} // the address's balance has been changed + balanceReadsInSlot map[common.Address]*uint256.Int // the address's balance has been read and used. + // codeSize can be derived based on code, but codeHash can not be directly derived based on code + // - codeSize is 0 for address not exist or empty code + // - codeHash is `common.Hash{}` for address not exist, emptyCodeHash(`Keccak256Hash(nil)`) for empty code, + // so we use codeReadsInSlot & codeHashReadsInSlot to keep code and codeHash, codeSize is derived from code + codeReadsInSlot map[common.Address][]byte // empty if address not exist or no code in this address + codeHashReadsInSlot map[common.Address]common.Hash + codeChangesInSlot map[common.Address]struct{} + kvReadsInSlot map[common.Address]Storage + kvChangesInSlot map[common.Address]StateKeys // value will be kept in dirtiedStateObjectsInSlot + // Actions such as SetCode, Suicide will change address's state. + // Later call like Exist(), Empty(), HasSuicided() depend on the address's state. + addrStateReadsInSlot map[common.Address]bool // true: exist, false: not exist or deleted + addrStateChangesInSlot map[common.Address]bool // true: created, false: deleted + + addrSnapDestructsReadsInSlot map[common.Address]bool + + accountsDeletedRecord []common.Hash + storagesDeleteRecord []common.Hash + accountsOriginDeleteRecord []common.Address + storagesOriginDeleteRecord []common.Address + + // Transaction will pay gas fee to system address. + // Parallel execution will clear system address's balance at first, in order to maintain transaction's + // gas fee value. Normal transaction will access system address twice, otherwise it means the transaction + // needs real system address's balance, the transaction will be marked redo with keepSystemAddressBalance = true + // systemAddress common.Address + // systemAddressOpsCount int + // keepSystemAddressBalance bool + + // we may need to redo for some specific reasons, like we read the wrong state and need to panic in sequential mode in SubRefund + needsRedo bool +} + // StateDB structs within the ethereum protocol are used to store anything // within the merkle trie. StateDBs take care of caching and storing // nested states. It's the general query interface to retrieve: @@ -71,6 +182,13 @@ type StateDB struct { snaps *snapshot.Tree // Nil if snapshot is not available snap snapshot.Snapshot // Nil if snapshot is not available + storeParallelLock sync.RWMutex + snapParallelLock sync.RWMutex // for parallel mode, for main StateDB, slot will read snapshot, while processor will write. + trieParallelLock sync.Mutex // for parallel mode, for getting states/objects from trie, to handle trie tracer. + snapDestructs map[common.Address]struct{} + snapAccounts map[common.Address][]byte + snapStorage map[common.Address]map[string][]byte + // originalRoot is the pre-state root, before any changes were made. // It will be updated when the Commit is called. originalRoot common.Hash @@ -149,13 +267,20 @@ type StateDB struct { AccountDeleted int StorageDeleted int + isParallel bool + parallel ParallelState // to keep all the parallel execution elements // Testing hooks onCommit func(states *triestate.Set) // Hook invoked when commit is performed } +func (s *StateDB) GetStateObjectFromUnconfirmedDB(addr common.Address) (*stateObject, bool) { + return nil, false +} + // New creates a new state from a given trie. func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { tr, err := db.OpenTrie(root) + if err != nil { return nil, err } @@ -178,6 +303,11 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) accessList: newAccessList(), transientStorage: newTransientStorage(), hasher: crypto.NewKeccakState(), + + parallel: ParallelState{ + SlotIndex: -1, + }, + txIndex: -1, } if sdb.snaps != nil { sdb.snap = sdb.snaps.Snapshot(root) @@ -215,6 +345,18 @@ func NewStateDBByTrie(tr Trie, db Database, snaps *snapshot.Tree) (*StateDB, err return sdb, nil } +func (s *StateDB) IsParallel() bool { + return s.isParallel +} + +func (s *StateDB) getBaseStateDB() *StateDB { + return s +} + +func (s *StateDB) getStateObjectFromStateObjects(addr common.Address) (*stateObject, bool) { + return s.loadStateObj(addr) +} + // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the // commit phase, most of the needed data is already hot. @@ -264,7 +406,6 @@ func (s *StateDB) Error() error { func (s *StateDB) AddLog(log *types.Log) { s.journal.append(addLogChange{txhash: s.thash}) - log.TxHash = s.thash log.TxIndex = uint(s.txIndex) log.Index = s.logSize @@ -336,6 +477,7 @@ func (s *StateDB) Empty(addr common.Address) bool { } // GetBalance retrieves the balance from the given address or 0 if object not found + func (s *StateDB) GetBalance(addr common.Address) *uint256.Int { stateObject := s.getStateObject(addr) if stateObject != nil { @@ -346,20 +488,19 @@ func (s *StateDB) GetBalance(addr common.Address) *uint256.Int { // GetNonce retrieves the nonce from the given address or 0 if object not found func (s *StateDB) GetNonce(addr common.Address) uint64 { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.Nonce() + object := s.getStateObject(addr) + if object != nil { + return object.Nonce() } - return 0 } // GetStorageRoot retrieves the storage root from the given address or empty // if object not found. func (s *StateDB) GetStorageRoot(addr common.Address) common.Hash { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.Root() + object := s.getStateObject(addr) + if object != nil { + return object.Root() } return common.Hash{} } @@ -369,22 +510,31 @@ func (s *StateDB) TxIndex() int { return s.txIndex } +// BaseTxIndex returns the tx index that slot db based. +func (s *StateDB) BaseTxIndex() int { + return s.parallel.baseTxIndex +} + func (s *StateDB) GetCode(addr common.Address) []byte { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.Code() + object := s.getStateObject(addr) + if object != nil { + return object.Code() } return nil } func (s *StateDB) GetCodeSize(addr common.Address) int { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.CodeSize() + object := s.getStateObject(addr) + if object != nil { + return object.CodeSize() } return 0 } +// GetCodeHash return: +// - common.Hash{}: the address does not exist +// - emptyCodeHash: the address exist, but code is empty +// - others: the address exist, and code is not empty func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { stateObject := s.getStateObject(addr) if stateObject != nil { @@ -395,18 +545,18 @@ func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { // GetState retrieves a value from the given account's storage trie. func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.GetState(hash) + object := s.getStateObject(addr) + if object != nil { + return object.GetState(hash) } return common.Hash{} } // GetCommittedState retrieves a value from the given account's committed storage trie. func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.GetCommittedState(hash) + object := s.getStateObject(addr) + if object != nil { + return object.GetCommittedState(hash) } return common.Hash{} } @@ -417,9 +567,9 @@ func (s *StateDB) Database() Database { } func (s *StateDB) HasSelfDestructed(addr common.Address) bool { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.selfDestructed + object := s.getStateObject(addr) + if object != nil { + return object.selfDestructed } return false } @@ -466,6 +616,7 @@ func (s *StateDB) SetCode(addr common.Address, code []byte) { } func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { + stateObject := s.getOrNewStateObject(addr) if stateObject != nil { stateObject.SetState(key, value) @@ -485,6 +636,7 @@ func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common // TODO(rjl493456442) this function should only be supported by 'unwritable' // state and all mutations made should all be discarded afterwards. if _, ok := s.stateObjectsDestruct[addr]; !ok { + fmt.Printf("Dav -- setStorage - stateObjectsDestruct[%s] = nil\n", addr) s.stateObjectsDestruct[addr] = nil } stateObject := s.getOrNewStateObject(addr) @@ -513,12 +665,11 @@ func (s *StateDB) SelfDestruct(addr common.Address) { } func (s *StateDB) Selfdestruct6780(addr common.Address) { - stateObject := s.getStateObject(addr) - if stateObject == nil { + object := s.getStateObject(addr) + if object == nil { return } - - if stateObject.created { + if object.created { s.SelfDestruct(addr) } } @@ -601,6 +752,7 @@ func (s *StateDB) deleteStateObject(obj *stateObject) { } // Delete the account from the trie addr := obj.Address() + if err := s.trie.DeleteAccount(addr); err != nil { s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) } @@ -610,23 +762,50 @@ func (s *StateDB) deleteStateObject(obj *stateObject) { // the object is not found or was deleted in this execution context. If you need // to differentiate between non-existent/just-deleted, use getDeletedStateObject. func (s *StateDB) getStateObject(addr common.Address) *stateObject { - if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { + obj := s.getDeletedStateObject(addr) + if obj != nil && !obj.deleted { return obj } return nil } -// getDeletedStateObject is similar to getStateObject, but instead of returning -// nil for a deleted state object, it returns the actual object with the deleted -// flag set. This is needed by the state journal to revert to the correct s- -// destructed object instead of wiping all knowledge about the state object. -func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { - // Prefer live objects if any is available - if obj := s.stateObjects[addr]; obj != nil { - return obj +func (s *StateDB) GetStateObjectFromSnapshotOrTrie(addr common.Address) (data *types.StateAccount, ok bool) { + return s.getStateObjectFromSnapshotOrTrie(addr) +} + +func (s *StateDB) SnapHasAccount(addr common.Address) (exist bool) { + if s.snap == nil { + fmt.Printf("Dav -- Test Snap have account snap is nil\n") + return false + } + + acc, _ := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())) + fmt.Printf("Dav -- Test Snap have account, root %s, have? %v\n", s.snap.Root(), acc != nil) + return acc != nil +} + +func (s *StateDB) TriHasAccount(addr common.Address) (exist bool) { + if s.trie == nil { + return false + } + + acc, _ := s.trie.GetAccount(addr) + return acc != nil +} + +func (s *StateDB) GetTrie() Trie { + if s.trie == nil { + return nil } + return s.trie +} + +func (s *StateDB) SetTrie(trie Trie) { + s.trie = trie +} + +func (s *StateDB) getStateObjectFromSnapshotOrTrie(addr common.Address) (data *types.StateAccount, ok bool) { // If no live objects are available, attempt to use snapshots - var data *types.StateAccount if s.snap != nil { start := time.Now() acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())) @@ -635,7 +814,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { } if err == nil { if acc == nil { - return nil + return nil, false } data = &types.StateAccount{ Nonce: acc.Nonce, @@ -644,53 +823,117 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { Root: common.BytesToHash(acc.Root), } if len(data.CodeHash) == 0 { - data.CodeHash = types.EmptyCodeHash.Bytes() + data.CodeHash = emptyCodeHash } if data.Root == (common.Hash{}) { data.Root = types.EmptyRootHash } } } + // If snapshot unavailable or reading from it failed, load from the database if data == nil { + var trie Trie + if s.isParallel { + // hold lock for parallel + s.trieParallelLock.Lock() + defer s.trieParallelLock.Unlock() + if s.parallel.isSlotDB { + if s.parallel.baseStateDB == nil { + return nil, false + } else { + tr, err := s.parallel.baseStateDB.db.OpenTrie(s.originalRoot) + if err != nil { + log.Error("Can not openTrie for parallel SlotDB\n") + return nil, false + } + trie = tr + } + } else { + trie = s.trie + } + } else { + trie = s.trie + } + start := time.Now() var err error - data, err = s.trie.GetAccount(addr) + data, err = trie.GetAccount(addr) if metrics.EnabledExpensive { s.AccountReads += time.Since(start) } if err != nil { s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %w", addr.Bytes(), err)) - return nil + return nil, false } if data == nil { - return nil + return nil, false } } + + return data, true +} + +// getDeletedStateObject is similar to getStateObject, but instead of returning +// nil for a deleted state object, it returns the actual object with the deleted +// flag set. This is needed by the state journal to revert to the correct s- +// destructed object instead of wiping all knowledge about the state object. +func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { + // Prefer live objects if any is available + if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil { + return obj + } + + data, ok := s.getStateObjectFromSnapshotOrTrie(addr) + if !ok { + return nil + } // Insert into the live set - obj := newObject(s, addr, data) - s.setStateObject(obj) + obj := newObject(s, s.isParallel, addr, data) + s.storeStateObj(addr, obj) return obj } func (s *StateDB) setStateObject(object *stateObject) { - s.stateObjects[object.Address()] = object + if s.isParallel { + // When a state object is stored into s.parallel.stateObjects, + // it belongs to base StateDB, it is confirmed and valid. + s.storeParallelLock.Lock() + s.parallel.stateObjects.Store(object.address, object) + s.storeParallelLock.Unlock() + } else { + s.stateObjects[object.Address()] = object + } + } // getOrNewStateObject retrieves a state object or create a new state object if nil. func (s *StateDB) getOrNewStateObject(addr common.Address) *stateObject { stateObject := s.getStateObject(addr) if stateObject == nil { - stateObject, _ = s.createObject(addr) + stateObject = s.createObject(addr) } return stateObject } // createObject creates a new state object. If there is an existing account with // the given address, it is overwritten and returned as the second return value. -func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) { - prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! - newobj = newObject(s, addr, nil) +// prev is used for CreateAccount to get its balance +// Parallel mode: +// if prev in dirty: revert is ok +// if prev in unconfirmed DB: addr state read record, revert should not put it back +// if prev in main DB: addr state read record, revert should not put it back +// if pre no exist: addr state read record, + +// `prev` is used to handle revert, to recover with the `prev` object +// In Parallel mode, we only need to recover to `prev` in SlotDB, +// +// a.if it is not in SlotDB, `revert` will remove it from the SlotDB +// b.if it is existed in SlotDB, `revert` will recover to the `prev` in SlotDB +// c.as `snapDestructs` it is the same +func (s *StateDB) createObject(addr common.Address) (newobj *stateObject) { + prev := s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! + newobj = newObject(s, s.isParallel, addr, nil) if prev == nil { s.journal.append(createObjectChange{account: &addr}) } else { @@ -698,6 +941,8 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) // account and storage data should be cleared as well. Note, it must // be done here, otherwise the destruction event of "original account" // will be lost. + s.snapParallelLock.Lock() // fixme: with new dispatch policy, the ending Tx could running, while the block have processed. + _, prevdestruct := s.stateObjectsDestruct[prev.address] if !prevdestruct { s.stateObjectsDestruct[prev.address] = prev.origin @@ -720,12 +965,19 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) delete(s.storages, prev.addrHash) delete(s.accountsOrigin, prev.address) delete(s.storagesOrigin, prev.address) + + if s.parallel.isSlotDB { + s.parallel.accountsDeletedRecord = append(s.parallel.accountsDeletedRecord, prev.addrHash) + s.parallel.storagesDeleteRecord = append(s.parallel.storagesDeleteRecord, prev.addrHash) + s.parallel.accountsOriginDeleteRecord = append(s.parallel.accountsOriginDeleteRecord, prev.address) + s.parallel.storagesOriginDeleteRecord = append(s.parallel.storagesOriginDeleteRecord, prev.address) + } + s.snapParallelLock.Unlock() } + + newobj.created = true s.setStateObject(newobj) - if prev != nil && !prev.deleted { - return newobj, prev - } - return newobj, nil + return newobj } // CreateAccount explicitly creates a state object. If a state object with the address @@ -739,15 +991,26 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) // // Carrying over the balance ensures that Ether doesn't disappear. func (s *StateDB) CreateAccount(addr common.Address) { - newObj, prev := s.createObject(addr) - if prev != nil { - newObj.setBalance(prev.data.Balance) - } + // no matter it is got from dirty, unconfirmed or main DB + // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which + // is the value newObject(), + preBalance := s.GetBalance(addr) + newObj := s.createObject(addr) + newObj.setBalance(new(uint256.Int).Set(preBalance)) // new big.Int for newObj } // Copy creates a deep, independent copy of the state. // Snapshots of the copied state cannot be applied to the copy. func (s *StateDB) Copy() *StateDB { + return s.copyInternal(false) +} + +// CopyDoPrefetch It is mainly for state prefetcher to do trie prefetch right now. +func (s *StateDB) CopyDoPrefetch() *StateDB { + return s.copyInternal(true) +} + +func (s *StateDB) copyInternal(doPrefetch bool) *StateDB { // Copy all the basic fields, initialize the memory ones state := &StateDB{ db: s.db, @@ -774,6 +1037,8 @@ func (s *StateDB) Copy() *StateDB { // miner to operate trie-backed only. snaps: s.snaps, snap: s.snap, + + parallel: ParallelState{}, } // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { @@ -781,11 +1046,11 @@ func (s *StateDB) Copy() *StateDB { // and in the Finalise-method, there is a case where an object is in the journal but not // in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for // nil - if object, exist := s.stateObjects[addr]; exist { + if object, exist := s.getStateObjectFromStateObjects(addr); exist { // Even though the original object is dirty, we are not copying the journal, // so we need to make sure that any side-effect the journal would have caused // during a commit (or similar op) is already applied to the copy. - state.stateObjects[addr] = object.deepCopy(state) + state.storeStateObj(addr, object.deepCopy(state)) state.stateObjectsDirty[addr] = struct{}{} // Mark the copy dirty to force internal (code/state) commits state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits @@ -796,19 +1061,22 @@ func (s *StateDB) Copy() *StateDB { // is empty. Thus, here we iterate over stateObjects, to enable copies // of copies. for addr := range s.stateObjectsPending { - if _, exist := state.stateObjects[addr]; !exist { - state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state) + if _, exist := state.getStateObjectFromStateObjects(addr); !exist { + object, _ := s.getStateObjectFromStateObjects(addr) + state.storeStateObj(addr, object.deepCopy(state)) } state.stateObjectsPending[addr] = struct{}{} } for addr := range s.stateObjectsDirty { - if _, exist := state.stateObjects[addr]; !exist { - state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state) + if _, exist := state.getStateObjectFromStateObjects(addr); !exist { + object, _ := s.getStateObjectFromStateObjects(addr) + state.storeStateObj(addr, object.deepCopy(state)) } state.stateObjectsDirty[addr] = struct{}{} } // Deep copy the destruction markers. for addr, value := range s.stateObjectsDestruct { + // fmt.Printf("Dav -- copyInternal - stateObjectsDestruct[%s] = (%p) : %v \n", addr, value, value) state.stateObjectsDestruct[addr] = value } // Deep copy the state changes made in the scope of block @@ -849,6 +1117,280 @@ func (s *StateDB) Copy() *StateDB { return state } +var journalPool = sync.Pool{ + New: func() interface{} { + return &journal{ + dirties: make(map[common.Address]int, defaultNumOfSlots), + entries: make([]journalEntry, 0, defaultNumOfSlots), + } + }, +} + +var addressToStructPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]struct{}, defaultNumOfSlots) }, +} + +var addressToStateKeysPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]StateKeys, defaultNumOfSlots) }, +} + +var addressToStoragePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]Storage, defaultNumOfSlots) }, +} + +var addressToStateObjectsPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]*stateObject, defaultNumOfSlots) }, +} + +var balancePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]*uint256.Int, defaultNumOfSlots) }, +} + +var addressToHashPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]common.Hash, defaultNumOfSlots) }, +} + +var addressToBytesPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address][]byte, defaultNumOfSlots) }, +} + +var addressToBoolPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]bool, defaultNumOfSlots) }, +} + +var addressToUintPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]uint64, defaultNumOfSlots) }, +} + +var snapStoragePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]map[string][]byte, defaultNumOfSlots) }, +} + +var snapStorageValuePool = sync.Pool{ + New: func() interface{} { return make(map[string][]byte, defaultNumOfSlots) }, +} + +var logsPool = sync.Pool{ + New: func() interface{} { return make(map[common.Hash][]*types.Log, defaultNumOfSlots) }, +} + +func (s *StateDB) PutSyncPool() { + for key := range s.parallel.codeReadsInSlot { + delete(s.parallel.codeReadsInSlot, key) + } + addressToBytesPool.Put(s.parallel.codeReadsInSlot) + + for key := range s.parallel.codeHashReadsInSlot { + delete(s.parallel.codeHashReadsInSlot, key) + } + addressToHashPool.Put(s.parallel.codeHashReadsInSlot) + + for key := range s.parallel.codeChangesInSlot { + delete(s.parallel.codeChangesInSlot, key) + } + addressToStructPool.Put(s.parallel.codeChangesInSlot) + + for key := range s.parallel.kvChangesInSlot { + delete(s.parallel.kvChangesInSlot, key) + } + addressToStateKeysPool.Put(s.parallel.kvChangesInSlot) + + for key := range s.parallel.kvReadsInSlot { + delete(s.parallel.kvReadsInSlot, key) + } + addressToStoragePool.Put(s.parallel.kvReadsInSlot) + + for key := range s.parallel.balanceChangesInSlot { + delete(s.parallel.balanceChangesInSlot, key) + } + addressToStructPool.Put(s.parallel.balanceChangesInSlot) + + for key := range s.parallel.balanceReadsInSlot { + delete(s.parallel.balanceReadsInSlot, key) + } + balancePool.Put(s.parallel.balanceReadsInSlot) + + for key := range s.parallel.addrStateReadsInSlot { + delete(s.parallel.addrStateReadsInSlot, key) + } + addressToBoolPool.Put(s.parallel.addrStateReadsInSlot) + + for key := range s.parallel.addrStateChangesInSlot { + delete(s.parallel.addrStateChangesInSlot, key) + } + addressToBoolPool.Put(s.parallel.addrStateChangesInSlot) + + for key := range s.parallel.nonceChangesInSlot { + delete(s.parallel.nonceChangesInSlot, key) + } + addressToStructPool.Put(s.parallel.nonceChangesInSlot) + + for key := range s.parallel.nonceReadsInSlot { + delete(s.parallel.nonceReadsInSlot, key) + } + addressToUintPool.Put(s.parallel.nonceReadsInSlot) + + for key := range s.parallel.addrSnapDestructsReadsInSlot { + delete(s.parallel.addrSnapDestructsReadsInSlot, key) + } + addressToBoolPool.Put(s.parallel.addrSnapDestructsReadsInSlot) + + for key := range s.parallel.dirtiedStateObjectsInSlot { + delete(s.parallel.dirtiedStateObjectsInSlot, key) + } + addressToStateObjectsPool.Put(s.parallel.dirtiedStateObjectsInSlot) + + for key := range s.stateObjectsPending { + delete(s.stateObjectsPending, key) + } + addressToStructPool.Put(s.stateObjectsPending) + + for key := range s.stateObjectsDirty { + delete(s.stateObjectsDirty, key) + } + addressToStructPool.Put(s.stateObjectsDirty) + + for key := range s.logs { + delete(s.logs, key) + } + logsPool.Put(s.logs) + + for key := range s.journal.dirties { + delete(s.journal.dirties, key) + } + s.journal.entries = s.journal.entries[:0] + journalPool.Put(s.journal) + + for key := range s.snapDestructs { + delete(s.snapDestructs, key) + } + addressToStructPool.Put(s.snapDestructs) + + for key := range s.snapAccounts { + delete(s.snapAccounts, key) + } + addressToBytesPool.Put(s.snapAccounts) + + for key, storage := range s.snapStorage { + for key := range storage { + delete(storage, key) + } + snapStorageValuePool.Put(storage) + delete(s.snapStorage, key) + } + snapStoragePool.Put(s.snapStorage) +} + +// CopyForSlot copy all the basic fields, initialize the memory ones +func (s *StateDB) CopyForSlot() *ParallelStateDB { + parallel := ParallelState{ + // The stateObjects in Parallel is thread-local. + // The base stateDB's stateObjects is thread-unsafe as it is not guarded by lock. + // The base stateDB's parallel.stateObjects is SyncMap and thread-safe. and no extra lock needed (TODO-dav). + // The base stateDB's parallel.stateObjects are updated by mergeSlotDB with Lock. + // The base stateDB's stateObject is read-only and never be updated once parallel execution happens. + // AND, presumably, the stateDB's stateObject is usually empty for real on-chain cases. + // Before execution, the slotDB should copy objects from base stateDB's parallel.stateObjects and stateObjects + // NOTICE: + // We are not reusing the base slot db's stateObjects although copy can be avoid. Because multiple thread + // access has lock check and there might be tricky bug such as thread1 handle tx0 at the same time with thread2 + // handle tx1, so what thread1's slotDB see in the s.parallel.stateObjects might be the middle result of Thread2. + // + // We are not do simple copy (lightweight pointer copy) as the stateObject can be accessed by different thread. + // Todo-dav: remove lock guard of parallel.stateObject access. + + stateObjects: &StateObjectSyncMap{}, // s.parallel.stateObjects, + codeReadsInSlot: addressToBytesPool.Get().(map[common.Address][]byte), + codeHashReadsInSlot: addressToHashPool.Get().(map[common.Address]common.Hash), + codeChangesInSlot: addressToStructPool.Get().(map[common.Address]struct{}), + kvChangesInSlot: addressToStateKeysPool.Get().(map[common.Address]StateKeys), + kvReadsInSlot: addressToStoragePool.Get().(map[common.Address]Storage), + balanceChangesInSlot: addressToStructPool.Get().(map[common.Address]struct{}), + balanceReadsInSlot: balancePool.Get().(map[common.Address]*uint256.Int), + addrStateReadsInSlot: addressToBoolPool.Get().(map[common.Address]bool), + addrStateChangesInSlot: addressToBoolPool.Get().(map[common.Address]bool), + nonceChangesInSlot: addressToStructPool.Get().(map[common.Address]struct{}), + nonceReadsInSlot: addressToUintPool.Get().(map[common.Address]uint64), + addrSnapDestructsReadsInSlot: addressToBoolPool.Get().(map[common.Address]bool), + isSlotDB: true, + dirtiedStateObjectsInSlot: addressToStateObjectsPool.Get().(map[common.Address]*stateObject), + accountsDeletedRecord: make([]common.Hash, 10), + storagesDeleteRecord: make([]common.Hash, 10), + accountsOriginDeleteRecord: make([]common.Address, 10), + storagesOriginDeleteRecord: make([]common.Address, 10), + } + state := &ParallelStateDB{ + StateDB: StateDB{ + db: s.db, + trie: nil, // Parallel StateDB may access the trie, but it takes no effect to the baseDB. + accounts: make(map[common.Hash][]byte), + storages: make(map[common.Hash]map[common.Hash][]byte), + accountsOrigin: make(map[common.Address][]byte), + storagesOrigin: make(map[common.Address]map[common.Hash][]byte), + stateObjects: make(map[common.Address]*stateObject), // replaced by parallel.stateObjects in parallel mode + stateObjectsPending: addressToStructPool.Get().(map[common.Address]struct{}), + stateObjectsDirty: addressToStructPool.Get().(map[common.Address]struct{}), + stateObjectsDestruct: make(map[common.Address]*types.StateAccount), + refund: 0, // should be 0 + logs: logsPool.Get().(map[common.Hash][]*types.Log), + logSize: 0, + preimages: make(map[common.Hash][]byte, len(s.preimages)), + journal: journalPool.Get().(*journal), + hasher: crypto.NewKeccakState(), + isParallel: true, + parallel: parallel, + }, + } + // no need to copy preimages, comment out and remove later + // for hash, preimage := range s.preimages { + // state.preimages[hash] = preimage + // } + + // copy parallel stateObjects + s.storeParallelLock.Lock() + s.parallel.stateObjects.Range(func(addr any, stateObj any) bool { + state.parallel.stateObjects.StoreStateObject(addr.(common.Address), stateObj.(*stateObject).lightCopy(state)) + return true + }) + s.storeParallelLock.Unlock() + if s.snaps != nil { + // In order for the miner to be able to use and make additions + // to the snapshot tree, we need to copy that as well. + // Otherwise, any block mined by ourselves will cause gaps in the tree, + // and force the miner to operate trie-backed only + state.snaps = s.snaps + state.snap = s.snap + // deep copy needed + state.snapDestructs = addressToStructPool.Get().(map[common.Address]struct{}) + s.snapParallelLock.RLock() + for k, v := range s.snapDestructs { + state.snapDestructs[k] = v + } + s.snapParallelLock.RUnlock() + // snapAccounts is useless in SlotDB, comment out and remove later + // state.snapAccounts = make(map[common.Address][]byte) // snapAccountPool.Get().(map[common.Address][]byte) + // for k, v := range s.snapAccounts { + // state.snapAccounts[k] = v + // } + + // snapStorage is useless in SlotDB either, it is updated on updateTrie, which is validation phase to update the snapshot of a finalized block. + // state.snapStorage = snapStoragePool.Get().(map[common.Address]map[string][]byte) + // for k, v := range s.snapStorage { + // temp := snapStorageValuePool.Get().(map[string][]byte) + // for kk, vv := range v { + // temp[kk] = vv + // } + // state.snapStorage[k] = temp + // } + + // trie prefetch should be done by dispatcher on StateObject Merge, + // disable it in parallel slot + // state.prefetcher = s.prefetcher + } + + return state +} + // Snapshot returns an identifier for the current revision of the state. func (s *StateDB) Snapshot() int { id := s.nextRevisionId @@ -883,8 +1425,21 @@ func (s *StateDB) GetRefund() uint64 { // into the tries just yet. Only IntermediateRoot or Commit will do that. func (s *StateDB) Finalise(deleteEmptyObjects bool) { addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) + for addr := range s.journal.dirties { - obj, exist := s.stateObjects[addr] + var obj *stateObject + var exist bool + if s.parallel.isSlotDB { + obj = s.parallel.dirtiedStateObjectsInSlot[addr] + if obj != nil { + exist = true + } else { + log.Error("StateDB Finalise dirty addr not in dirtiedStateObjectsInSlot", + "addr", addr) + } + } else { + obj, exist = s.getStateObjectFromStateObjects(addr) + } if !exist { // ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2 // That tx goes out of gas, and although the notion of 'touched' does not exist there, the @@ -894,6 +1449,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { // Thus, we can safely ignore it here continue } + if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) { obj.deleted = true @@ -910,9 +1466,23 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { delete(s.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) delete(s.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect) delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) + + if s.parallel.isSlotDB { + s.parallel.accountsDeletedRecord = append(s.parallel.accountsDeletedRecord, obj.addrHash) + s.parallel.storagesDeleteRecord = append(s.parallel.storagesDeleteRecord, obj.addrHash) + s.parallel.accountsOriginDeleteRecord = append(s.parallel.accountsOriginDeleteRecord, obj.address) + s.parallel.storagesOriginDeleteRecord = append(s.parallel.storagesOriginDeleteRecord, obj.address) + } + } else { - obj.finalise(true) // Prefetch slots in the background + // 1.none parallel mode, we do obj.finalise(true) as normal + // 2.with parallel mode, we do obj.finalise(true) on dispatcher, not on slot routine + // obj.finalise(true) will clear its dirtyStorage, will make prefetch broken. + if !s.isParallel || !s.parallel.isSlotDB { + obj.finalise(true) // Prefetch slots in the background + } } + obj.created = false s.stateObjectsPending[addr] = struct{}{} s.stateObjectsDirty[addr] = struct{}{} @@ -932,6 +1502,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { // IntermediateRoot computes the current root hash of the state trie. // It is called in between transactions to get the root hash that // goes into transaction receipts. +// TODO: For parallel SlotDB, IntermediateRootForSlot is used, need to clean up this method. func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // Finalise all the dirty storage states and write them into the tries s.Finalise(deleteEmptyObjects) @@ -991,18 +1562,39 @@ func (s *StateDB) StateIntermediateRoot() common.Hash { // the remainder without, but pre-byzantium even the initial prefetcher is // useless, so no sleep lost. prefetcher := s.prefetcher + r := s.originalRoot if s.prefetcher != nil { defer func() { s.prefetcher.close() s.prefetcher = nil }() + if s.isParallel { + r = s.trie.Hash() + } + } + // Although naively it makes sense to retrieve the account trie and then do + // the contract storage and account updates sequentially, that short circuits + // the account prefetcher. Instead, let's process all the storage updates + // first, giving the account prefetches just a few more milliseconds of time + // to pull useful data from disk. + for addr := range s.stateObjectsPending { + var obj *stateObject + if s.parallel.isSlotDB { + if obj = s.parallel.dirtiedStateObjectsInSlot[addr]; !obj.deleted { + obj.updateRoot() + } + } else { + if obj, _ = s.getStateObjectFromStateObjects(addr); !obj.deleted { + obj.updateRoot() + } + } } - // Now we're about to start to write changes to the trie. The trie is so far // _untouched_. We can check with the prefetcher, if it can give us a trie // which has the same root, but also has some content loaded into it. + // The parallel execution do the change incrementally, so can not check the prefetcher here if prefetcher != nil { - if trie := prefetcher.trie(common.Hash{}, s.originalRoot); trie != nil { + if trie := prefetcher.trie(common.Hash{}, r); trie != nil { s.trie = trie } } @@ -1015,8 +1607,17 @@ func (s *StateDB) StateIntermediateRoot() common.Hash { } usedAddrs := make([][]byte, 0, len(s.stateObjectsPending)) + for addr := range s.stateObjectsPending { - if obj := s.stateObjects[addr]; obj.deleted { + if s.parallel.isSlotDB { + if obj := s.parallel.dirtiedStateObjectsInSlot[addr]; obj.deleted { + s.deleteStateObject(obj) + s.AccountDeleted += 1 + } else { + s.updateStateObject(obj) + s.AccountUpdated += 1 + } + } else if obj, _ := s.getStateObjectFromStateObjects(addr); obj.deleted { s.deleteStateObject(obj) s.AccountDeleted += 1 } else { @@ -1028,8 +1629,11 @@ func (s *StateDB) StateIntermediateRoot() common.Hash { if prefetcher != nil { prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) } + // parallel slotDB trie will be updated to mainDB since intermediateRoot happens after conflict check. + // so it should be save to clear pending here. + // otherwise there can be a case that the deleted object get ignored and processes as live object in verify phase. - if len(s.stateObjectsPending) > 0 { + if /*s.isParallel == false &&*/ len(s.stateObjectsPending) > 0 { s.stateObjectsPending = make(map[common.Address]struct{}) } // Track the amount of time wasted on hashing the account trie @@ -1221,6 +1825,7 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A if s.db.TrieDB().Scheme() == rawdb.HashScheme { return incomplete, nil } + for addr, prev := range s.stateObjectsDestruct { // The original account was non-existing, and it's marked as destructed // in the scope of block. It can be case (a) or (b). @@ -1241,6 +1846,7 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A if prev.Root == types.EmptyRootHash { continue } + // Remove storage slots belong to the account. aborted, slots, set, err := s.deleteStorage(addr, addrHash, prev.Root) if err != nil { @@ -1253,6 +1859,9 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A if aborted { incomplete[addr] = struct{}{} delete(s.storagesOrigin, addr) + if s.parallel.isSlotDB { + s.parallel.storagesOriginDeleteRecord = append(s.parallel.storagesOriginDeleteRecord, addr) + } continue } if s.storagesOrigin[addr] == nil { @@ -1652,3 +2261,327 @@ func copy2DSet[k comparable](set map[k]map[common.Hash][]byte) map[k]map[common. } return copied } + +// PrepareForParallel prepares for state db to be used in parallel execution mode. +func (s *StateDB) PrepareForParallel() { + s.isParallel = true + s.parallel.stateObjects = &StateObjectSyncMap{} + // copy objects in stateObjects into parallel if not exist. + // This is lock free as the PrepareForParallel() is invoked at serial phase. + for addr, objPtr := range s.stateObjects { + if _, exist := s.parallel.stateObjects.LoadStateObject(addr); !exist { + newObj := objPtr.deepCopy(s) + s.parallel.stateObjects.StoreStateObject(addr, newObj) + } + } +} + +func (s *StateDB) AddrPrefetch(slotDb *ParallelStateDB) { + addressesToPrefetch := make([][]byte, 0, len(slotDb.parallel.dirtiedStateObjectsInSlot)) + for addr, obj := range slotDb.parallel.dirtiedStateObjectsInSlot { + addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure + if obj.deleted { + continue + } + // copied from obj.finalise(true) + slotsToPrefetch := make([][]byte, 0, obj.dirtyStorage.Length()) + obj.dirtyStorage.Range(func(key, value interface{}) bool { + originalValue, _ := obj.originStorage.GetValue(key.(common.Hash)) + if value.(common.Hash) != originalValue { + originalKey := key.(common.Hash) + slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(originalKey[:])) // Copy needed for closure + } + return true + }) + if s.prefetcher != nil && len(slotsToPrefetch) > 0 { + s.prefetcher.prefetch(obj.addrHash, obj.data.Root, obj.address, slotsToPrefetch) + } + } + + if s.prefetcher != nil && len(addressesToPrefetch) > 0 { + // log.Info("AddrPrefetch", "slotDb.TxIndex", slotDb.TxIndex(), + // "len(addressesToPrefetch)", len(slotDb.parallel.addressesToPrefetch)) + s.prefetcher.prefetch(common.Hash{}, s.originalRoot, emptyAddr, addressesToPrefetch) + } +} + +// MergeSlotDB is for Parallel execution mode, when the transaction has been +// finalized(dirty -> pending) on execution slot, the execution results should be +// merged back to the main StateDB. +func (s *StateDB) MergeSlotDB(slotDb *ParallelStateDB, slotReceipt *types.Receipt, txIndex int) *StateDB { + s.SetTxContext(slotDb.thash, slotDb.txIndex) + + for s.nextRevisionId < slotDb.nextRevisionId { + if len(slotDb.validRevisions) > 0 { + r := slotDb.validRevisions[s.nextRevisionId] + s.validRevisions = append(s.validRevisions, r) + } + s.nextRevisionId++ + if len(slotDb.validRevisions) < s.nextRevisionId { + continue + } + } + + // receipt.Logs use unified log index within a block + // align slotDB's log index to the block stateDB's logSize + for _, l := range slotReceipt.Logs { + l.Index += s.logSize + s.logs[s.thash] = append(s.logs[s.thash], l) + } + + s.logSize += slotDb.logSize + + // only merge dirty objects + addressesToPrefetch := make([][]byte, 0, len(slotDb.stateObjectsDirty)) + + for addr := range slotDb.stateObjectsDirty { + if _, exist := s.stateObjectsDirty[addr]; !exist { + s.stateObjectsDirty[addr] = struct{}{} + } + + // stateObjects: KV, balance, nonce... + dirtyObj, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr] + if !ok { + log.Error("parallel merge, but dirty object not exist!", "SlotIndex", slotDb.parallel.SlotIndex, "txIndex:", slotDb.txIndex, "addr", addr) + continue + } + mainObj, exist := s.loadStateObj(addr) + + if !exist || mainObj.deleted { + + // fixme: it is also state change + // addr not exist on main DB, do ownership transfer + // dirtyObj.db = s + // dirtyObj.finalise(true) // true: prefetch on dispatcher + mainObj = dirtyObj.deepCopy(s) + /* if addr == WBNBAddress && slotDb.wbnbMakeUpBalance != nil { + mainObj.setBalance(slotDb.wbnbMakeUpBalance) + }*/ + if !dirtyObj.deleted { + mainObj.finalise(true) + } + s.storeStateObj(addr, mainObj) + + // fixme: should not delete, would cause unconfirmed DB incorrect? + // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? + if dirtyObj.deleted { + // remove the addr from snapAccounts&snapStorage only when object is deleted. + // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for + // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts + delete(s.snapAccounts, addr) + delete(s.snapStorage, addr) + delete(s.accounts, dirtyObj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) + delete(s.storages, dirtyObj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) + delete(s.accountsOrigin, dirtyObj.address) // Clear out any previously updated account data (may be recreated via a resurrect) + delete(s.storagesOrigin, dirtyObj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) + } + } else { + // addr already in main DB, do merge: balance, KV, code, State(create, suicide) + // can not do copy or ownership transfer directly, since dirtyObj could have outdated + // data(maybe updated within the conflict window) + var newMainObj = mainObj // we don't need to copy the object since the storages are thread safe + if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; ok { + // there are 3 kinds of state change: + // 1.Suicide + // 2.Empty Delete + // 3.createObject + // a: AddBalance,SetState to a non-exist or deleted(suicide, empty delete) address. + // b: CreateAccount: like DAO the fork, regenerate an account carry its balance without KV + // For these state change, do ownership transfer for efficiency: + // dirtyObj.db = s + // newMainObj = dirtyObj + + // The deepCopy() here introduces issue that the pendingStorage may not empty until block validation. + // so the pendingStorage filled by the execution of previous txs in same block may get overwritten by + // deepCopy here, which causes issue in root calculation. + newMainObj = dirtyObj.deepCopy(s) + + // Merge Storages. Only merge ones doesn't exist, since dirtyObj is newer than mainObj + mainObj.originStorage.Range(func(key, value interface{}) bool { + if _, found := newMainObj.originStorage.GetValue(key.(common.Hash)); !found { + newMainObj.originStorage.StoreValue(key.(common.Hash), value.(common.Hash)) + } + return true + }) + + mainObj.pendingStorage.Range(func(key, value interface{}) bool { + if _, found := newMainObj.pendingStorage.GetValue(key.(common.Hash)); !found { + newMainObj.pendingStorage.StoreValue(key.(common.Hash), value.(common.Hash)) + } + return true + }) + + // TODO - dav: check - the dirtyStorage should be always empty for mainObj as it should be moved to + // pendingStorage by Finalise in execution phase. + mainObj.dirtyStorage.Range(func(key, value interface{}) bool { + if _, found := newMainObj.dirtyStorage.GetValue(key.(common.Hash)); !found { + newMainObj.dirtyStorage.StoreValue(key.(common.Hash), value.(common.Hash)) + } + return true + }) + + // should not delete, would cause unconfirmed DB incorrect. + // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? + if dirtyObj.deleted { + // remove the addr from snapAccounts&snapStorage only when object is deleted. + // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for + // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts + delete(s.snapAccounts, addr) + delete(s.snapStorage, addr) + delete(s.accounts, dirtyObj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) + delete(s.storages, dirtyObj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) + delete(s.accountsOrigin, dirtyObj.address) // Clear out any previously updated account data (may be recreated via a resurrect) + delete(s.storagesOrigin, dirtyObj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) + } + } else { + // deepCopy a temporary *stateObject for safety, since slot could read the address, + // dispatch should avoid overwrite the StateObject directly otherwise, it could + // crash for: concurrent map iteration and map write + + if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced { + newMainObj.setBalance(dirtyObj.Balance()) + } + if _, coded := slotDb.parallel.codeChangesInSlot[addr]; coded { + if bytes.Equal(dirtyObj.data.CodeHash, types.EmptyCodeHash.Bytes()) { // addr.Hex() == "0x0000000000000000000000000000000000000100" { + fmt.Printf("Dav -- MergeSlotDB - codeChangeInSlot - setObjectCodeHash to Empty, addr: %s\n", addr) + } + newMainObj.code = dirtyObj.code + newMainObj.data.CodeHash = dirtyObj.data.CodeHash + newMainObj.dirtyCode = true + } + if keys, stated := slotDb.parallel.kvChangesInSlot[addr]; stated { + newMainObj.MergeSlotObject(s.db, dirtyObj, keys) + } + if _, nonced := slotDb.parallel.nonceChangesInSlot[addr]; nonced { + // dirtyObj.Nonce() should not be less than newMainObj + newMainObj.setNonce(dirtyObj.Nonce()) + } + newMainObj.deleted = dirtyObj.deleted + } + if !newMainObj.deleted { + newMainObj.finalise(true) // true: prefetch on dispatcher + } + // update the object + s.storeStateObj(addr, newMainObj) + } + addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure + } + + if s.prefetcher != nil && len(addressesToPrefetch) > 0 { + s.prefetcher.prefetch(common.Hash{}, s.originalRoot, emptyAddr, addressesToPrefetch) // prefetch for trie node of account + } + + for addr := range slotDb.stateObjectsPending { + if _, exist := s.stateObjectsPending[addr]; !exist { + s.stateObjectsPending[addr] = struct{}{} + } + } + + for addr := range slotDb.stateObjectsDestruct { + if acc, exist := s.stateObjectsDestruct[addr]; !exist { + s.stateObjectsDestruct[addr] = acc + } + } + // slotDb.logs: logs will be kept in receipts, no need to do merge + + for hash, preimage := range slotDb.preimages { + s.preimages[hash] = preimage + } + if s.accessList != nil && slotDb.accessList != nil { + s.accessList = slotDb.accessList.Copy() + } + + // handle accounts, storages and origins + for _, addr := range slotDb.parallel.accountsDeletedRecord { + if _, ok := s.accounts[addr]; ok { + delete(s.accounts, addr) + } + } + for addr, val := range slotDb.accounts { + s.accounts[addr] = val + } + + // storages + for _, addr := range slotDb.parallel.storagesDeleteRecord { + if _, ok := s.storages[addr]; ok { + delete(s.storages, addr) + } + } + + for addr, slotStMap := range slotDb.storages { + mainStMap := s.storages[addr] + if mainStMap == nil { + mainStMap = make(map[common.Hash][]byte) + } + for k, v := range slotStMap { + mainStMap[k] = v + } + s.storages[addr] = mainStMap + } + + // accountsOrigin + for _, addr := range slotDb.parallel.accountsOriginDeleteRecord { + if _, ok := s.accountsOrigin[addr]; ok { + delete(s.accountsOrigin, addr) + } + } + + for addr, val := range slotDb.accountsOrigin { + s.accountsOrigin[addr] = val + } + + // storagesOrigin + for _, addr := range slotDb.parallel.storagesOriginDeleteRecord { + if _, ok := s.storagesOrigin[addr]; ok { + delete(s.storagesOrigin, addr) + } + } + + for addr, slotStOrgMap := range slotDb.storagesOrigin { + mainStOrgMap := s.storagesOrigin[addr] + if mainStOrgMap == nil { + mainStOrgMap = make(map[common.Hash][]byte) + } + for k, v := range slotStOrgMap { + mainStOrgMap[k] = v + } + s.storagesOrigin[addr] = mainStOrgMap + } + + if slotDb.snaps != nil { + for k := range slotDb.snapDestructs { + // There could be a race condition for parallel transaction execution + // One transaction add balance 0 to an empty address, will delete it(delete empty is enabled). + // While another concurrent transaction could add a none-zero balance to it, make it not empty + // We fixed it by add an addr state read record for add balance 0 + s.snapParallelLock.Lock() + s.snapDestructs[k] = struct{}{} + s.snapParallelLock.Unlock() + } + } + + return s +} + +func (s *StateDB) ParallelMakeUp(common.Address, []byte) { + // do nothing, this API is for parallel mode +} + +func (s *StateDB) PrintParallelStateObjects() { + if s.parallel.stateObjects == nil { + return + } + s.parallel.stateObjects.Range(func(a any, v any) bool { + fmt.Printf("Dav - .parallel.stateObjects addr %v, val: %v\n", a, v) + return true + }) +} + +func (s *StateDB) GetNonceFromBaseDB(addr common.Address) uint64 { + return s.getBaseStateDB().GetNonce(addr) +} + +// delete me! +func (s *StateDB) GetDB() Database { + return s.db +} diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index e71c984f12..0ffeca0e22 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -19,6 +19,7 @@ package state import ( "bytes" "encoding/binary" + "encoding/hex" "errors" "fmt" "math" @@ -43,6 +44,10 @@ import ( "github.com/holiman/uint256" ) +var ( + systemAddress = common.HexToAddress("0xffffFFFfFFffffffffffffffFfFFFfffFFFfFFfE") +) + // Tests that updating a state trie does not leak any database writes prior to // actually committing the state. func TestUpdateLeaks(t *testing.T) { @@ -180,6 +185,7 @@ func TestCopy(t *testing.T) { // modify all in memory for i := byte(0); i < 255; i++ { + origObj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i})) copyObj := copy.getOrNewStateObject(common.BytesToAddress([]byte{i})) ccopyObj := ccopy.getOrNewStateObject(common.BytesToAddress([]byte{i})) @@ -465,7 +471,7 @@ func forEachStorage(s *StateDB, addr common.Address, cb func(key, value common.H for it.Next() { key := common.BytesToHash(s.trie.GetKey(it.Key)) - if value, dirty := so.dirtyStorage[key]; dirty { + if value, dirty := so.dirtyStorage.GetValue(key); dirty { if !cb(key, value) { return nil } @@ -1191,3 +1197,367 @@ func TestDeleteStorage(t *testing.T) { t.Fatalf("difference found:\nfast: %v\nslow: %v\n", fastRes, slowRes) } } + +func TestSuicide(t *testing.T) { + // Create an initial state with a few accounts + db := rawdb.NewMemoryDatabase() + state, _ := New(types.EmptyRootHash, NewDatabase(db), nil) + unconfirmedDBs := new(sync.Map) + + state.PrepareForParallel() + slotDb := NewSlotDB(state, 0, 0, unconfirmedDBs) + + addr := common.BytesToAddress([]byte("so")) + slotDb.SetBalance(addr, big.NewInt(1)) + + slotDb.SelfDestruct(addr) + + if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; !ok { + t.Fatalf("address should exist in addrStateChangesInSlot") + } + + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + t.Fatalf("address should exist in dirtiedStateObjectsInSlot") + } + + hasSuicide := slotDb.HasSelfDestructed(addr) + if !hasSuicide { + t.Fatalf("address should be suicided") + } + + if _, ok := slotDb.parallel.addrStateReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in addrStateReadsInSlot") + } +} + +func TestSetAndGetState(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(types.EmptyRootHash, db, nil) + + addr := common.BytesToAddress([]byte("so")) + state.SetBalance(addr, big.NewInt(1)) + unconfirmedDBs := new(sync.Map) + state.PrepareForParallel() + slotDb := NewSlotDB(state, 0, 0, unconfirmedDBs) + slotDb.SetState(addr, common.BytesToHash([]byte("test key")), common.BytesToHash([]byte("test store"))) + + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + t.Fatalf("address should exist in dirtiedStateObjectsInSlot") + } + + if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; !ok { + t.Fatalf("address should exist in stateChangesInSlot") + } + + oldValueRead := state.GetState(addr, common.BytesToHash([]byte("test key"))) + emptyHash := common.Hash{} + if oldValueRead != emptyHash { + t.Fatalf("value read in old state should be empty") + } + + valueRead := slotDb.GetState(addr, common.BytesToHash([]byte("test key"))) + if valueRead != common.BytesToHash([]byte("test store")) { + t.Fatalf("value read should be equal to the stored value") + } + + if _, ok := slotDb.parallel.addrStateReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in stateReadsInSlot") + } +} + +func TestSetAndGetCode(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + + addr := common.BytesToAddress([]byte("so")) + state.SetBalance(addr, big.NewInt(1)) + state.PrepareForParallel() + + unconfirmedDBs := new(sync.Map) + slotDb := NewSlotDB(state, 0, 0, unconfirmedDBs) + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; ok { + t.Fatalf("address should not exist in dirtiedStateObjectsInSlot") + } + + slotDb.SetCode(addr, []byte("test code")) + + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + t.Fatalf("address should exist in dirtiedStateObjectsInSlot") + } + + if _, ok := slotDb.parallel.codeChangesInSlot[addr]; !ok { + t.Fatalf("address should exist in codeChangesInSlot") + } + + codeRead := slotDb.GetCode(addr) + if string(codeRead) != "test code" { + t.Fatalf("code read should be equal to the code stored") + } + + if _, ok := slotDb.parallel.codeReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in codeReadsInSlot") + } +} + +func TestGetCodeSize(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + + addr := common.BytesToAddress([]byte("so")) + state.SetBalance(addr, big.NewInt(1)) + state.PrepareForParallel() + + unconfirmedDBs := new(sync.Map) + slotDb := NewSlotDB(state, 0, 0, unconfirmedDBs) + slotDb.SetCode(addr, []byte("test code")) + + codeSize := slotDb.GetCodeSize(addr) + if codeSize != 9 { + t.Fatalf("code size should be 9") + } + + if _, ok := slotDb.parallel.codeReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in codeReadsInSlot") + } +} + +func TestGetCodeHash(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + + addr := common.BytesToAddress([]byte("so")) + state.SetBalance(addr, big.NewInt(1)) + state.PrepareForParallel() + unconfirmedDBs := new(sync.Map) + slotDb := NewSlotDB(state, 0, 0, unconfirmedDBs) + + slotDb.SetCode(addr, []byte("test code")) + + codeSize := slotDb.GetCodeHash(addr) + + if hex.EncodeToString(codeSize[:]) != "6e73fa02f7828b28608b078b007a4023fb40453c3e102b83828a3609a94d8cbb" { + t.Fatalf("code hash should be 6e73fa02f7828b28608b078b007a4023fb40453c3e102b83828a3609a94d8cbb") + } + if _, ok := slotDb.parallel.codeReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in codeReadsInSlot") + } +} + +func TestSetNonce(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + + addr := common.BytesToAddress([]byte("so")) + state.SetBalance(addr, big.NewInt(1)) + state.SetNonce(addr, 1) + state.PrepareForParallel() + + unconfirmedDBs := new(sync.Map) + slotDb := NewSlotDB(state, 0, 0, unconfirmedDBs) + slotDb.SetNonce(addr, 2) + + oldNonce := state.GetNonce(addr) + if oldNonce != 1 { + t.Fatalf("old nonce should be 1") + } + + newNonce := slotDb.GetNonce(addr) + if newNonce != 2 { + t.Fatalf("new nonce should be 2") + } + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + t.Fatalf("address should exist in dirtiedStateObjectsInSlot") + } +} + +func TestSetAndGetBalance(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + + addr := systemAddress + state.SetBalance(addr, big.NewInt(1)) + state.PrepareForParallel() + unconfirmedDBs := new(sync.Map) + slotDb := NewSlotDB(state, 0, 0, unconfirmedDBs) + + slotDb.SetBalance(addr, big.NewInt(2)) + + oldBalance := state.GetBalance(addr) + if oldBalance.Int64() != 1 { + t.Fatalf("old balance should be 1") + } + + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + t.Fatalf("address should exist in dirtiedStateObjectsInSlot") + } + + if _, ok := slotDb.parallel.balanceChangesInSlot[addr]; !ok { + t.Fatalf("address should exist in balanceChangesInSlot") + } + + newBalance := slotDb.GetBalance(addr) + if newBalance.Int64() != 2 { + t.Fatalf("new nonce should be 2") + } + + if _, ok := slotDb.parallel.balanceReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in balanceReadsInSlot") + } +} + +func TestSubBalance(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + addr := systemAddress + state.SetBalance(addr, big.NewInt(2)) + + state.PrepareForParallel() + unconfirmedDBs := new(sync.Map) + slotDb := NewSlotDB(state, 0, 0, unconfirmedDBs) + slotDb.SubBalance(addr, big.NewInt(1)) + + oldBalance := state.GetBalance(addr) + if oldBalance.Int64() != 2 { + t.Fatalf("old balance should be 1") + } + + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + t.Fatalf("address should exist in dirtiedStateObjectsInSlot") + } + + if _, ok := slotDb.parallel.balanceChangesInSlot[addr]; !ok { + t.Fatalf("address should exist in balanceChangesInSlot") + } + + if _, ok := slotDb.parallel.balanceReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in balanceReadsInSlot") + } + + newBalance := slotDb.GetBalance(addr) + if newBalance.Int64() != 1 { + t.Fatalf("new nonce should be 2") + } +} + +func TestAddBalance(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + addr := systemAddress + state.SetBalance(addr, big.NewInt(2)) + state.PrepareForParallel() + unconfirmedDBs := new(sync.Map) + slotDb := NewSlotDB(state, 0, 0, unconfirmedDBs) + slotDb.AddBalance(addr, big.NewInt(1)) + + oldBalance := state.GetBalance(addr) + if oldBalance.Int64() != 2 { + t.Fatalf("old balance should be 1") + } + + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + t.Fatalf("address should exist in dirtiedStateObjectsInSlot") + } + + if _, ok := slotDb.parallel.balanceChangesInSlot[addr]; !ok { + t.Fatalf("address should exist in balanceChangesInSlot") + } + + if _, ok := slotDb.parallel.balanceReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in balanceReadsInSlot") + } + + newBalance := slotDb.GetBalance(addr) + if newBalance.Int64() != 3 { + t.Fatalf("new nonce should be 2") + } +} + +func TestEmpty(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + addr := systemAddress + state.SetBalance(addr, big.NewInt(2)) + state.PrepareForParallel() + + unconfirmedDBs := new(sync.Map) + slotDb := NewSlotDB(state, 0, 0, unconfirmedDBs) + + empty := slotDb.Empty(addr) + if empty { + t.Fatalf("address should exist") + } + + if _, ok := slotDb.parallel.addrStateReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in addrStateReadsInSlot") + } +} + +func TestExist(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + addr := systemAddress + state.SetBalance(addr, big.NewInt(2)) + state.PrepareForParallel() + unconfirmedDBs := new(sync.Map) + slotDb := NewSlotDB(state, 0, 0, unconfirmedDBs) + + exist := slotDb.Exist(addr) + if !exist { + t.Fatalf("address should exist") + } + + if _, ok := slotDb.parallel.addrStateReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in addrStateReadsInSlot") + } +} + +func TestMergeSlotDB(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + state.PrepareForParallel() + unconfirmedDBs := new(sync.Map) + + oldSlotDb := NewSlotDB(state, 0, 0, unconfirmedDBs) + + newSlotDb := NewSlotDB(state, 0, 0, unconfirmedDBs) + + addr := systemAddress + newSlotDb.SetBalance(addr, big.NewInt(2)) + newSlotDb.SetState(addr, common.BytesToHash([]byte("test key")), common.BytesToHash([]byte("test store"))) + newSlotDb.SetCode(addr, []byte("test code")) + newSlotDb.SelfDestruct(addr) + newSlotDb.Finalise(true) + + changeList := oldSlotDb.MergeSlotDB(newSlotDb, &types.Receipt{}, 0) + + if ok := changeList.getDeletedStateObject(addr); ok == nil || !ok.selfDestructed { + t.Fatalf("address should exist in StateObjectSuicided") + } + + if ok := changeList.getStateObject(addr); ok != nil { + t.Fatalf("address should exist in StateChangeSet") + } + + if ok := changeList.GetBalance(addr); ok != common.Big0 { + t.Fatalf("address should exist in StateChangeSet") + } + + if ok := changeList.GetCode(addr); ok != nil { + t.Fatalf("address should exist in CodeChangeSet") + } + + if ok := changeList.getStateObject(addr); ok != nil { + t.Fatalf("address should exist in AddrStateChangeSet") + } +} diff --git a/core/state/transient_storage.go b/core/state/transient_storage.go index 66e563efa7..ea2b5bfefe 100644 --- a/core/state/transient_storage.go +++ b/core/state/transient_storage.go @@ -21,7 +21,7 @@ import ( ) // transientStorage is a representation of EIP-1153 "Transient Storage". -type transientStorage map[common.Address]Storage +type transientStorage map[common.Address]StorageMap // newTransientStorage creates a new instance of a transientStorage. func newTransientStorage() transientStorage { @@ -31,7 +31,7 @@ func newTransientStorage() transientStorage { // Set sets the transient-storage `value` for `key` at the given `addr`. func (t transientStorage) Set(addr common.Address, key, value common.Hash) { if _, ok := t[addr]; !ok { - t[addr] = make(Storage) + t[addr] = make(StorageMap) } t[addr][key] = value } @@ -49,7 +49,8 @@ func (t transientStorage) Get(addr common.Address, key common.Hash) common.Hash func (t transientStorage) Copy() transientStorage { storage := make(transientStorage) for key, value := range t { - storage[key] = value.Copy() + m := value.Copy() + storage[key] = m.(StorageMap) } return storage } diff --git a/core/state_processor.go b/core/state_processor.go index c9df98536c..541d303ceb 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -129,7 +129,6 @@ func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, sta if msg.IsDepositTx && config.IsOptimismRegolith(evm.Context.Time) { nonce = statedb.GetNonce(msg.From) } - // Apply the transaction to the current state (included in the env). result, err := ApplyMessage(evm, msg, gp) if err != nil { diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 77efaede58..fbc6632a75 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -18,6 +18,7 @@ package core import ( "crypto/ecdsa" + "github.com/holiman/uint256" "math/big" "testing" @@ -34,7 +35,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" - "github.com/holiman/uint256" "golang.org/x/crypto/sha3" ) @@ -73,6 +73,7 @@ func TestStateProcessorErrors(t *testing.T) { tx, _ := types.SignTx(types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data), signer, key) return tx } + var mkDynamicTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int) *types.Transaction { tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{ Nonce: nonce, @@ -111,7 +112,6 @@ func TestStateProcessorErrors(t *testing.T) { } return tx } - { // Tests against a 'recent' chain definition var ( db = rawdb.NewMemoryDatabase() @@ -128,7 +128,7 @@ func TestStateProcessorErrors(t *testing.T) { }, }, } - blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil) + blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) tooBigInitCode = [params.MaxInitCodeSize + 1]byte{} ) @@ -147,6 +147,7 @@ func TestStateProcessorErrors(t *testing.T) { }, want: "could not apply tx 1 [0x0026256b3939ed97e2c4a6f3fce8ecf83bdcfa6d507c47838c308a1fb0436f62]: nonce too low: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 0 state: 1", }, + { // ErrNonceTooHigh txs: []*types.Transaction{ makeTx(key1, 100, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil), @@ -288,7 +289,7 @@ func TestStateProcessorErrors(t *testing.T) { }, }, } - blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) ) defer blockchain.Stop() for i, tt := range []struct { @@ -312,7 +313,6 @@ func TestStateProcessorErrors(t *testing.T) { } } } - // ErrSenderNoEOA, for this we need the sender to have contract code { var ( @@ -327,7 +327,7 @@ func TestStateProcessorErrors(t *testing.T) { }, }, } - blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil) + blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) ) defer blockchain.Stop() for i, tt := range []struct { diff --git a/core/state_transition.go b/core/state_transition.go index a23a26468e..0e174a0a7c 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -554,6 +554,7 @@ func (st *StateTransition) innerTransitionDb() (*ExecutionResult, error) { ReturnData: ret, }, nil } + effectiveTip := msg.GasPrice if rules.IsLondon { effectiveTip = cmath.BigMin(msg.GasTipCap, new(big.Int).Sub(msg.GasFeeCap, st.evm.Context.BaseFee)) @@ -589,7 +590,6 @@ func (st *StateTransition) innerTransitionDb() (*ExecutionResult, error) { st.state.AddBalance(params.OptimismL1FeeRecipient, amtU256) } } - return &ExecutionResult{ UsedGas: st.gasUsed(), RefundedGas: gasRefund, diff --git a/core/types/block.go b/core/types/block.go index 1a357baa3a..0e0b621974 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -237,6 +237,8 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []* } else { b.header.ReceiptHash = DeriveSha(Receipts(receipts), hasher) b.header.Bloom = CreateBloom(receipts) + //fmt.Printf("Dav -- NewBlock -- ReceptHash: %s\nRecepts: %v\nBloom: %s\n", b.header.ReceiptHash, receipts, hexutils.BytesToHex(b.header.Bloom.Bytes())) + //debug.PrintStack() } if len(uncles) == 0 { diff --git a/core/types/receipt.go b/core/types/receipt.go index 67c1addb3d..8cb2bbdad8 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -18,6 +18,7 @@ package types import ( "bytes" + "encoding/json" "errors" "fmt" "io" @@ -605,3 +606,11 @@ func u32ptrTou64ptr(a *uint32) *uint64 { b := uint64(*a) return &b } + +// Debug PrettyPrint +func (r Receipt) PrettyPrint() (string, error) { + b, err := r.MarshalJSON() + var prettyJSON bytes.Buffer + json.Indent(&prettyJSON, b, "", "\t") + return prettyJSON.String(), err +} diff --git a/core/vm/evm.go b/core/vm/evm.go index 43ab27308b..0b2dc98db0 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -17,10 +17,11 @@ package vm import ( - "github.com/ethereum/go-ethereum/core/opcodeCompiler/compiler" "math/big" "sync/atomic" + "github.com/ethereum/go-ethereum/core/opcodeCompiler/compiler" + "github.com/holiman/uint256" "github.com/ethereum/go-ethereum/common" @@ -261,6 +262,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas contract.optimized, code = tryGetOptimizedCode(evm, codeHash, code) contract.SetCallCode(&addrCopy, codeHash, code) ret, err = evm.interpreter.Run(contract, input, false) + evm.StateDB.ParallelMakeUp(addr, input) gas = contract.Gas } else { addrCopy := addr @@ -269,6 +271,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas contract := NewContract(caller, AccountRef(addrCopy), value, gas) contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), code) ret, err = evm.interpreter.Run(contract, input, false) + evm.StateDB.ParallelMakeUp(addr, input) gas = contract.Gas } } @@ -523,14 +526,18 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, return nil, common.Address{}, gas, ErrNonceUintOverflow } evm.StateDB.SetNonce(caller.Address(), nonce+1) + // We add this to the access list _before_ taking a snapshot. Even if the creation fails, // the access-list change should not be rolled back if evm.chainRules.IsBerlin { evm.StateDB.AddAddressToAccessList(address) } + // Ensure there's no existing contract already at the designated address contractHash := evm.StateDB.GetCodeHash(address) - if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) { + // debug + no := evm.StateDB.GetNonce(address) + if no != 0 || (contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) { return nil, common.Address{}, 0, ErrContractAddressCollision } // Create a new account on the state diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 4b141d8f9a..f6dd7c2377 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -18,7 +18,6 @@ package vm import ( "errors" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/params" diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 431b287415..822a118b4c 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -798,6 +798,7 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext if interpreter.readOnly { return nil, ErrWriteProtection } + beneficiary := scope.Stack.pop() balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance) diff --git a/core/vm/interface.go b/core/vm/interface.go index 25bfa06720..bf2f42e994 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -79,6 +79,13 @@ type StateDB interface { AddLog(*types.Log) AddPreimage(common.Hash, []byte) + + ParallelMakeUp(addr common.Address, input []byte) + + // todo -dav : delete following + PrintParallelStateObjects() + GetNonceFromBaseDB(addr common.Address) uint64 + TxIndex() int } // CallContext provides a basic interface for the EVM calling conventions. The EVM diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 80acdcc013..67978d877f 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -33,6 +33,8 @@ type Config struct { NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls) EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages ExtraEips []int // Additional EIPS that are to be enabled + EnableParallelExec bool // Whether to execute transaction in parallel mode when do full sync + ParallelTxNum int // Number of slot for transaction execution OptimismPrecompileOverrides PrecompileOverrides // Precompile overrides for Optimism EnableOpcodeOptimizations bool // Enable opcode optimization } @@ -174,6 +176,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( } }() } + // The Interpreter main run loop (contextual). This loop runs until either an // explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during // the execution of one of the operations or until the done flag is set by the @@ -197,6 +200,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( if !contract.UseGas(cost) { return nil, ErrOutOfGas } + if operation.dynamicGas != nil { // All ops with a dynamic memory usage also has a dynamic gas cost. var memorySize uint64 @@ -242,10 +246,8 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( } pc++ } - if err == errStopToken { err = nil // clear stop token error } - return res, err } diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index f420a24105..28ad9c2824 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -18,7 +18,6 @@ package vm import ( "errors" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/params" @@ -37,6 +36,7 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc { current = evm.StateDB.GetState(contract.Address(), slot) cost = uint64(0) ) + // Check slot presence in the access list if addrPresent, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent { cost = params.ColdSloadCostEIP2929 @@ -50,7 +50,6 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc { } } value := common.Hash(y.Bytes32()) - if current == value { // noop (1) // EIP 2200 original clause: // return params.SloadGasEIP2200, nil diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index 52756b4093..362ecf73e4 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -188,7 +188,7 @@ func benchmarkEVM_Create(bench *testing.B, code string) { EIP155Block: new(big.Int), EIP158Block: new(big.Int), }, - EVMConfig: vm.Config{}, + EVMConfig: vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, } // Warm up the intpools and stuff bench.ResetTimer() diff --git a/eth/backend.go b/eth/backend.go index b690938e87..a8ed4fe836 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -223,6 +223,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { var ( vmConfig = vm.Config{ EnablePreimageRecording: config.EnablePreimageRecording, + EnableParallelExec: config.ParallelTxMode, + ParallelTxNum: config.ParallelTxNum, EnableOpcodeOptimizations: config.EnableOpcodeOptimizing, } cacheConfig = &core.CacheConfig{ diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 097888f024..68858972d8 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -72,7 +72,7 @@ func newTesterWithNotification(t *testing.T, success func()) *downloadTester { Alloc: types.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, BaseFee: big.NewInt(params.InitialBaseFee), } - chain, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + chain, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { panic(err) } diff --git a/eth/downloader/testchain_test.go b/eth/downloader/testchain_test.go index 46f3febd8b..e4e10849fe 100644 --- a/eth/downloader/testchain_test.go +++ b/eth/downloader/testchain_test.go @@ -64,7 +64,6 @@ func init() { fsHeaderContCheck = 500 * time.Millisecond testChainBase = newTestChain(blockCacheMaxItems+200, testGenesis) - var forkLen = int(fullMaxForkAncestry + 50) var wg sync.WaitGroup @@ -218,7 +217,7 @@ func newTestBlockchain(blocks []*types.Block) *core.BlockChain { if pregenerated { panic("Requested chain generation outside of init") } - chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, testGspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, testGspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { panic(err) } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 383641ffc3..77080f5870 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -218,6 +218,8 @@ type Config struct { RollupDisableTxPoolAdmission bool RollupHaltOnIncompatibleProtocolVersion string + ParallelTxMode bool // Whether to execute transaction in parallel mode when do full sync + ParallelTxNum int // Number of slot for transaction execution EnableOpcodeOptimizing bool } diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index 659ca5ce19..1c8a1fcb38 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -250,7 +250,7 @@ func TestFilters(t *testing.T) { } }) var l uint64 - bc, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l) + bc, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, &l) if err != nil { t.Fatal(err) } diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go index 79217502f7..f860735fed 100644 --- a/eth/gasprice/gasprice_test.go +++ b/eth/gasprice/gasprice_test.go @@ -164,7 +164,7 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBacke b.AddTx(types.MustSignNewTx(key, signer, txdata)) }) // Construct testing chain - chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), &core.CacheConfig{TrieCleanNoPrefetch: true}, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), &core.CacheConfig{TrieCleanNoPrefetch: true}, gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("Failed to create local chain, %v", err) } diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index 1eb9a9ea49..c6532b54ce 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -99,8 +99,8 @@ func testForkIDSplit(t *testing.T, protocol uint) { gspecNoFork = &core.Genesis{Config: configNoFork} gspecProFork = &core.Genesis{Config: configProFork} - chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, gspecNoFork, nil, engine, vm.Config{}, nil, nil) - chainProFork, _ = core.NewBlockChain(dbProFork, nil, gspecProFork, nil, engine, vm.Config{}, nil, nil) + chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, gspecNoFork, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) + chainProFork, _ = core.NewBlockChain(dbProFork, nil, gspecProFork, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) _, blocksNoFork, _ = core.GenerateChainWithGenesis(gspecNoFork, engine, 2, nil) _, blocksProFork, _ = core.GenerateChainWithGenesis(gspecProFork, engine, 2, nil) diff --git a/eth/handler_test.go b/eth/handler_test.go index eacdc52aa6..8b7b86b9de 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -171,7 +171,7 @@ func newTestHandlerWithBlocks(blocks int) *testHandler { Config: params.TestChainConfig, Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}}, } - chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) _, bs, _ := core.GenerateChainWithGenesis(gspec, ethash.NewFaker(), blocks, nil) if _, err := chain.InsertChain(bs); err != nil { diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index fdf551ef21..47a21b0ac6 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -104,7 +104,7 @@ func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int, Config: config, Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(100_000_000_000_000_000)}}, } - chain, _ := core.NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil) + chain, _ := core.NewBlockChain(db, nil, gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) _, bs, _ := core.GenerateChainWithGenesis(gspec, engine, blocks, generator) if _, err := chain.InsertChain(bs); err != nil { diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 9c3a423f6f..8f3fdd50a5 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -158,7 +158,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i i SnapshotLimit: 0, TrieDirtyDisabled: true, // Archive mode } - chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, nil, backend.engine, vm.Config{}, nil, nil) + chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, nil, backend.engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -254,7 +254,7 @@ func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block if idx == txIndex { return msg, context, statedb, release, nil } - vmenv := vm.NewEVM(context, txContext, statedb, b.chainConfig, vm.Config{}) + vmenv := vm.NewEVM(context, txContext, statedb, b.chainConfig, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } diff --git a/go.mod b/go.mod index 73768e2685..2cc116668f 100644 --- a/go.mod +++ b/go.mod @@ -58,6 +58,7 @@ require ( github.com/olekukonko/tablewriter v0.0.5 github.com/panjf2000/ants/v2 v2.4.5 github.com/peterh/liner v1.2.0 + github.com/prometheus/client_golang v1.14.0 github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 github.com/prysmaticlabs/prysm/v4 v4.2.0 github.com/rs/cors v1.8.3 @@ -148,7 +149,6 @@ require ( github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index f65c98a50a..49de12d5e7 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -607,7 +607,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.E // Generate blocks for testing db, blocks, _ := core.GenerateChainWithGenesis(gspec, engine, n, generator) txlookupLimit := uint64(0) - chain, err := core.NewBlockChain(db, cacheConfig, gspec, nil, engine, vm.Config{}, nil, &txlookupLimit) + chain, err := core.NewBlockChain(db, cacheConfig, gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, &txlookupLimit) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } diff --git a/metrics/exp/exp.go b/metrics/exp/exp.go index 7e3f82a075..4530097a2c 100644 --- a/metrics/exp/exp.go +++ b/metrics/exp/exp.go @@ -5,6 +5,7 @@ package exp import ( "expvar" "fmt" + "github.com/prometheus/client_golang/prometheus/promhttp" "net/http" "sync" @@ -44,6 +45,7 @@ func Exp(r metrics.Registry) { // http.HandleFunc("/debug/vars", e.expHandler) // haven't found an elegant way, so just use a different endpoint http.Handle("/debug/metrics", h) + http.Handle("/debug/metrics/go_prometheus", promhttp.Handler()) http.Handle("/debug/metrics/prometheus", prometheus.Handler(r)) } @@ -58,6 +60,7 @@ func ExpHandler(r metrics.Registry) http.Handler { func Setup(address string) { m := http.NewServeMux() m.Handle("/debug/metrics", ExpHandler(metrics.DefaultRegistry)) + m.Handle("/debug/metrics/go_prometheus", promhttp.Handler()) m.Handle("/debug/metrics/prometheus", prometheus.Handler(metrics.DefaultRegistry)) log.Info("Starting metrics server", "addr", fmt.Sprintf("http://%s/debug/metrics", address)) go func() { diff --git a/miner/miner_test.go b/miner/miner_test.go index 5907fb4464..4629dca13b 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -310,7 +310,7 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) { // Create consensus engine engine := clique.New(chainConfig.Clique, chainDB) // Create Ethereum backend - bc, err := core.NewBlockChain(chainDB, nil, genesis, nil, engine, vm.Config{}, nil, nil) + bc, err := core.NewBlockChain(chainDB, nil, genesis, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("can't create new chain %v", err) } diff --git a/miner/worker_test.go b/miner/worker_test.go index 7a78b6898f..f82fad5dbf 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -130,7 +130,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine default: t.Fatalf("unexpected consensus engine type: %T", engine) } - chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) if err != nil { t.Fatalf("core.NewBlockChain failed: %v", err) } @@ -181,7 +181,7 @@ func TestGenerateAndImportBlock(t *testing.T) { defer w.close() // This test chain imports the mined blocks. - chain, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, b.genesis, nil, engine, vm.Config{}, nil, nil) + chain, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, b.genesis, nil, engine, vm.Config{EnableParallelExec: true, ParallelTxNum: 1}, nil, nil) defer chain.Stop() // Ignore empty commit here for less noise. diff --git a/tests/block_test.go b/tests/block_test.go index fb355085fd..ac11974e66 100644 --- a/tests/block_test.go +++ b/tests/block_test.go @@ -21,8 +21,9 @@ import ( "runtime" "testing" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + + "github.com/ethereum/go-ethereum/common" ) func TestBlockchain(t *testing.T) { @@ -90,4 +91,5 @@ func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) { t.Errorf("test in path mode with snapshotter failed: %v", err) return } + } diff --git a/tests/block_test_util.go b/tests/block_test_util.go index 5f77a1c326..bc90d524a9 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -151,15 +151,19 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, tracer vm.EVMLogger, po cache.SnapshotWait = true } chain, err := core.NewBlockChain(db, cache, gspec, nil, engine, vm.Config{ - Tracer: tracer, + EnableParallelExec: true, + ParallelTxNum: 4, + Tracer: tracer, }, nil, nil) if err != nil { + fmt.Printf("Dav -- Test - NewBlockChain fail, err: %s\n", err) return err } defer chain.Stop() validBlocks, err := t.insertBlocks(chain) if err != nil { + fmt.Printf("Dav -- Test - t.insertBlocks fail, err: %s\n", err) return err } // Import succeeded: regardless of whether the _test_ succeeds or not, schedule diff --git a/tests/state_test.go b/tests/state_test.go index fc1c351f07..6e7e672f24 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -163,7 +163,7 @@ const traceErrorLimit = 400000 func withTrace(t *testing.T, gasLimit uint64, test func(vm.Config) error) { // Use config from command line arguments. - config := vm.Config{} + config := vm.Config{EnableParallelExec: true, ParallelTxNum: 1} err := test(config) if err == nil { return @@ -237,7 +237,7 @@ func runBenchmark(b *testing.B, t *StateTest) { key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index) b.Run(key, func(b *testing.B) { - vmconfig := vm.Config{} + vmconfig := vm.Config{EnableParallelExec: true, ParallelTxNum: 1} config, eips, err := GetChainConfig(subtest.Fork) if err != nil { diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go index 4139dfc8b3..a352020359 100644 --- a/triedb/pathdb/database.go +++ b/triedb/pathdb/database.go @@ -395,11 +395,14 @@ func (db *Database) Recover(root common.Hash, loader triestate.TrieLoader) error start = time.Now() dl = db.tree.bottom() ) + // fmt.Printf("Dav -- pathdb Recover, dl, root: %s\n", dl.rootHash()) for dl.rootHash() != root { + // fmt.Printf("Dav -- pathdb Recover, not equal, dl.root %s, root: %s\n", dl.rootHash(), root) h, err := readHistory(db.freezer, dl.stateID()) if err != nil { return err } + dl, err = dl.revert(h, loader) if err != nil { return err diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go index a0cb6f25a9..325afba7ee 100644 --- a/triedb/pathdb/disklayer.go +++ b/triedb/pathdb/disklayer.go @@ -380,6 +380,7 @@ func (dl *diskLayer) revert(h *history, loader triestate.TrieLoader) (*diskLayer // Apply the reverse state changes upon the current state. This must // be done before holding the lock in order to access state in "this" // layer. + nodes, err := triestate.Apply(h.meta.parent, h.meta.root, h.accounts, h.storages, loader) if err != nil { return nil, err