From 0d9dc40a39242130e71621e081640935afc23a0a Mon Sep 17 00:00:00 2001 From: realowen Date: Wed, 31 May 2023 15:33:25 +0800 Subject: [PATCH] perf: op-node related api improvement This PR is mainly aimed at optimizing the API of the op-node block issuing process to op-geth, mainly divided into two parts: 1. Improvement of prepareWork: - Added block and receipt cache after block import - Improved the getBlockByHash api to reduce repetitive loops 2. Improvement of newPayload: - The state was saved in advance after sealingBlock. When op-node confirmed the block, the efficiency of writing the block was improved. In summary, this PR optimizes the API interaction between op-node and op-geth in the block issuance process, mainly including: 1. Caching blocks and receipts after importing blocks to avoid repetitive queries 2. Optimizing the getBlockByHash API to reduce repetitive loops 3. Saving the state in advance after sealing the block so that the efficiency of writing the block can be improved after op-node confirms the block. --------- Co-authored-by: j75689 --- core/blockchain.go | 166 ++++++++++++++++++++++++++------------ ethclient/ethclient.go | 1 + internal/ethapi/api.go | 38 +++++---- miner/payload_building.go | 61 ++++++++++++-- miner/worker.go | 53 +++++++----- miner/worker_test.go | 4 +- 6 files changed, 226 insertions(+), 97 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 9915d0c768..2b51883f81 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -90,13 +90,16 @@ var ( ) const ( - bodyCacheLimit = 256 - blockCacheLimit = 256 - receiptsCacheLimit = 32 + bodyCacheLimit = 512 + blockCacheLimit = 512 + receiptsCacheLimit = 512 + txLogsCacheLimit = 512 txLookupCacheLimit = 1024 maxFutureBlocks = 256 maxTimeFutureBlocks = 30 - TriesInMemory = 128 // default number of recent trie roots to keep in memory + + miningStateCacheLimit = 128 + TriesInMemory = 128 // default number of recent trie roots to keep in memory // BlockChainVersion ensures that an incompatible database forces a resync from scratch. // @@ -210,6 +213,11 @@ type BlockChain struct { bodyRLPCache *lru.Cache[common.Hash, rlp.RawValue] receiptsCache *lru.Cache[common.Hash, []*types.Receipt] blockCache *lru.Cache[common.Hash, *types.Block] + + miningReceiptsCache *lru.Cache[common.Hash, []*types.Receipt] + miningTxLogsCache *lru.Cache[common.Hash, []*types.Log] + miningStateCache *lru.Cache[common.Hash, *state.StateDB] + txLookupCache *lru.Cache[common.Hash, *rawdb.LegacyTxLookupEntry] // future blocks are blocks added for later processing @@ -266,25 +274,29 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis } bc := &BlockChain{ - chainConfig: chainConfig, - cacheConfig: cacheConfig, - db: db, - triedb: triedb, - flushInterval: int64(cacheConfig.TrieTimeLimit), - triegc: prque.New[int64, common.Hash](nil), - quit: make(chan struct{}), - chainmu: syncx.NewClosableMutex(), - bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit), - bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit), - receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit), - blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), - txLookupCache: lru.NewCache[common.Hash, *rawdb.LegacyTxLookupEntry](txLookupCacheLimit), - futureBlocks: lru.NewCache[common.Hash, *types.Block](maxFutureBlocks), - engine: engine, - vmConfig: vmConfig, + chainConfig: chainConfig, + cacheConfig: cacheConfig, + db: db, + triedb: triedb, + flushInterval: int64(cacheConfig.TrieTimeLimit), + triegc: prque.New[int64, common.Hash](nil), + quit: make(chan struct{}), + chainmu: syncx.NewClosableMutex(), + bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit), + bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit), + receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit), + blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), + miningReceiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit), + miningTxLogsCache: lru.NewCache[common.Hash, []*types.Log](txLogsCacheLimit), + miningStateCache: lru.NewCache[common.Hash, *state.StateDB](miningStateCacheLimit), + txLookupCache: lru.NewCache[common.Hash, *rawdb.LegacyTxLookupEntry](txLookupCacheLimit), + futureBlocks: lru.NewCache[common.Hash, *types.Block](maxFutureBlocks), + engine: engine, + vmConfig: vmConfig, } bc.forker = NewForkChoice(bc, shouldPreserve) bc.stateCache = state.NewDatabaseWithNodeDBAndCache(bc.db, bc.triedb) + bc.validator = NewBlockValidator(chainConfig, bc, engine) bc.prefetcher = NewStatePrefetcher(chainConfig, bc, engine) bc.processor = NewStateProcessor(chainConfig, bc, engine) @@ -772,6 +784,9 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha bc.bodyRLPCache.Purge() bc.receiptsCache.Purge() bc.blockCache.Purge() + bc.miningReceiptsCache.Purge() + bc.miningTxLogsCache.Purge() + bc.miningStateCache.Purge() bc.txLookupCache.Purge() bc.futureBlocks.Purge() @@ -1034,6 +1049,33 @@ func (bc *BlockChain) procFutureBlocks() { } } +// CacheBlock cache block in memory +func (bc *BlockChain) CacheBlock(hash common.Hash, block *types.Block) { + bc.hc.numberCache.Add(hash, block.NumberU64()) + bc.hc.headerCache.Add(hash, block.Header()) + bc.blockCache.Add(hash, block) +} + +// CacheReceipts cache receipts in memory +func (bc *BlockChain) CacheReceipts(hash common.Hash, receipts types.Receipts) { + bc.receiptsCache.Add(hash, receipts) +} + +// CacheMiningReceipts cache receipts in memory +func (bc *BlockChain) CacheMiningReceipts(hash common.Hash, receipts types.Receipts) { + bc.miningReceiptsCache.Add(hash, receipts) +} + +// CacheMiningTxLogs cache tx logs in memory +func (bc *BlockChain) CacheMiningTxLogs(hash common.Hash, logs []*types.Log) { + bc.miningTxLogsCache.Add(hash, logs) +} + +// CacheMiningState cache mining state in memory +func (bc *BlockChain) CacheMiningState(hash common.Hash, state *state.StateDB) { + bc.miningStateCache.Add(hash, state) +} + // WriteStatus status of write type WriteStatus byte @@ -1765,46 +1807,58 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) continue } - // Retrieve the parent block and it's state to execute on top - start := time.Now() - parent := it.previous() - if parent == nil { - parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) - } - statedb, err := state.NewWithSharedPool(parent.Root, bc.stateCache, bc.snaps) - if err != nil { - return it.index, err - } - - // Enable prefetching to pull in trie node paths while processing transactions - statedb.StartPrefetcher("chain") - activeState = statedb + var ( + err error + receipts, receiptExist = bc.miningReceiptsCache.Get(block.Hash()) + logs, logExist = bc.miningTxLogsCache.Get(block.Hash()) + statedb, stateExist = bc.miningStateCache.Get(block.Hash()) + usedGas = block.GasUsed() + start = time.Now() + ) - // If we have a followup block, run that against the current state to pre-cache - // transactions and probabilistically some of the account/storage trie nodes. interruptCh := make(chan struct{}) - if !bc.cacheConfig.TrieCleanNoPrefetch { - if followup, err := it.peek(); followup != nil && err == nil { - throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps) + pstart := time.Now() - go func(start time.Time, followup *types.Block, throwaway *state.StateDB) { - bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, interruptCh) + // skip block process if we already have the state, receipts and logs from mining work + if !(receiptExist && logExist && stateExist) { + // Retrieve the parent block and it's state to execute on top + parent := it.previous() + if parent == nil { + parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) + } + statedb, err = state.NewWithSharedPool(parent.Root, bc.stateCache, bc.snaps) + if err != nil { + return it.index, err + } + + // Enable prefetching to pull in trie node paths while processing transactions + statedb.StartPrefetcher("chain") + activeState = statedb + + // If we have a followup block, run that against the current state to pre-cache + // transactions and probabilistically some of the account/storage trie nodes. - blockPrefetchExecuteTimer.Update(time.Since(start)) - }(time.Now(), followup, throwaway) + if !bc.cacheConfig.TrieCleanNoPrefetch { + if followup, err := it.peek(); followup != nil && err == nil { + throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps) + + go func(start time.Time, followup *types.Block, throwaway *state.StateDB) { + bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, interruptCh) + + blockPrefetchExecuteTimer.Update(time.Since(start)) + }(time.Now(), followup, throwaway) + } + } + // Process block using the parent state as reference point + receipts, logs, usedGas, err = bc.processor.Process(block, statedb, bc.vmConfig) + if err != nil { + bc.reportBlock(block, receipts, err) + close(interruptCh) + return it.index, err } } - // Process block using the parent state as reference point - pstart := time.Now() - receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig) - if err != nil { - bc.reportBlock(block, receipts, err) - close(interruptCh) - return it.index, err - } ptime := time.Since(pstart) - vstart := time.Now() if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { bc.reportBlock(block, receipts, err) @@ -1814,6 +1868,14 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) vtime := time.Since(vstart) proctime := time.Since(start) // processing + validation + // pre-cache the block and receipts, so that it can be retrieved quickly by rcp + bc.CacheBlock(block.Hash(), block) + err = types.Receipts(receipts).DeriveFields(bc.chainConfig, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), block.Transactions()) + if err != nil { + log.Warn("Failed to derive receipt fields", "block", block.Hash(), "err", err) + } + bc.CacheReceipts(block.Hash(), receipts) + // Update the metrics touched during block processing and validation accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing) storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing) diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index c8353b25ae..6114b5eeb8 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -129,6 +129,7 @@ func (ec *Client) getBlock(ctx context.Context, method string, args ...interface if err := json.Unmarshal(raw, &body); err != nil { return nil, err } + // Quick-verify transaction and uncle lists. This mostly helps with debugging the server. if head.UncleHash == types.EmptyUncleHash && len(body.UncleHashes) > 0 { return nil, fmt.Errorf("server returned non-empty uncle list but block header indicates no uncles") diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index b33090c261..1c391f4dd2 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1355,19 +1355,19 @@ func RPCMarshalBlock(ctx context.Context, block *types.Block, inclTx bool, fullT fields["size"] = hexutil.Uint64(block.Size()) if inclTx { - formatTx := func(tx *types.Transaction) (interface{}, error) { + formatTx := func(tx *types.Transaction, index int) (interface{}, error) { return tx.Hash(), nil } if fullTx { - formatTx = func(tx *types.Transaction) (interface{}, error) { - return newRPCTransactionFromBlockHash(ctx, block, tx.Hash(), backend), nil + formatTx = func(tx *types.Transaction, index int) (interface{}, error) { + return newRPCTransactionFromBlockHash(ctx, block, tx.Hash(), index, tx, backend), nil } } txs := block.Transactions() transactions := make([]interface{}, len(txs)) var err error for i, tx := range txs { - if transactions[i], err = formatTx(tx); err != nil { + if transactions[i], err = formatTx(tx, i); err != nil { return nil, err } } @@ -1512,12 +1512,15 @@ func NewRPCPendingTransaction(tx *types.Transaction, current *types.Header, conf } // newRPCTransactionFromBlockIndex returns a transaction that will serialize to the RPC representation. -func newRPCTransactionFromBlockIndex(ctx context.Context, b *types.Block, index uint64, backend Backend) *RPCTransaction { - txs := b.Transactions() - if index >= uint64(len(txs)) { - return nil +func newRPCTransactionFromBlockIndex(ctx context.Context, b *types.Block, index uint64, tx *types.Transaction, backend Backend) *RPCTransaction { + if tx == nil { + txs := b.Transactions() + if index >= uint64(len(txs)) { + return nil + } + tx = txs[index] } - tx := txs[index] + rcpt := depositTxReceipt(ctx, b.Hash(), index, backend, tx) return newRPCTransaction(tx, b.Hash(), b.NumberU64(), index, b.BaseFee(), backend.ChainConfig(), rcpt) } @@ -1547,13 +1550,8 @@ func newRPCRawTransactionFromBlockIndex(b *types.Block, index uint64) hexutil.By } // newRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation. -func newRPCTransactionFromBlockHash(ctx context.Context, b *types.Block, hash common.Hash, backend Backend) *RPCTransaction { - for idx, tx := range b.Transactions() { - if tx.Hash() == hash { - return newRPCTransactionFromBlockIndex(ctx, b, uint64(idx), backend) - } - } - return nil +func newRPCTransactionFromBlockHash(ctx context.Context, b *types.Block, hash common.Hash, idx int, tx *types.Transaction, backend Backend) *RPCTransaction { + return newRPCTransactionFromBlockIndex(ctx, b, uint64(idx), tx, backend) } // accessListResult returns an optional accesslist @@ -1700,7 +1698,7 @@ func (s *TransactionAPI) GetBlockTransactionCountByHash(ctx context.Context, blo // GetTransactionByBlockNumberAndIndex returns the transaction for the given block number and index. func (s *TransactionAPI) GetTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) *RPCTransaction { if block, _ := s.b.BlockByNumber(ctx, blockNr); block != nil { - return newRPCTransactionFromBlockIndex(ctx, block, uint64(index), s.b) + return newRPCTransactionFromBlockIndex(ctx, block, uint64(index), nil, s.b) } return nil } @@ -1708,7 +1706,7 @@ func (s *TransactionAPI) GetTransactionByBlockNumberAndIndex(ctx context.Context // GetTransactionByBlockHashAndIndex returns the transaction for the given block hash and index. func (s *TransactionAPI) GetTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) *RPCTransaction { if block, _ := s.b.BlockByHash(ctx, blockHash); block != nil { - return newRPCTransactionFromBlockIndex(ctx, block, uint64(index), s.b) + return newRPCTransactionFromBlockIndex(ctx, block, uint64(index), nil, s.b) } return nil } @@ -1910,9 +1908,9 @@ func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c if tx.To() == nil { addr := crypto.CreateAddress(from, tx.Nonce()) - log.Info("Submitted contract creation", "hash", tx.Hash().Hex(), "from", from, "nonce", tx.Nonce(), "contract", addr.Hex(), "value", tx.Value()) + log.Debug("Submitted contract creation", "hash", tx.Hash().Hex(), "from", from, "nonce", tx.Nonce(), "contract", addr.Hex(), "value", tx.Value()) } else { - log.Info("Submitted transaction", "hash", tx.Hash().Hex(), "from", from, "nonce", tx.Nonce(), "recipient", tx.To(), "value", tx.Value()) + log.Debug("Submitted transaction", "hash", tx.Hash().Hex(), "from", from, "nonce", tx.Nonce(), "recipient", tx.To(), "value", tx.Value()) } return tx.Hash(), nil } diff --git a/miner/payload_building.go b/miner/payload_building.go index 26e89eef31..50d93faa96 100644 --- a/miner/payload_building.go +++ b/miner/payload_building.go @@ -101,7 +101,7 @@ func newPayload(empty *types.Block, id engine.PayloadID) *Payload { } // update updates the full-block with latest built version. -func (payload *Payload) update(block *types.Block, fees *big.Int, elapsed time.Duration) { +func (payload *Payload) update(block *types.Block, fees *big.Int, elapsed time.Duration, postFuncs ...func()) { payload.lock.Lock() defer payload.lock.Unlock() @@ -121,6 +121,12 @@ func (payload *Payload) update(block *types.Block, fees *big.Int, elapsed time.D log.Info("Updated payload", "id", payload.id, "number", block.NumberU64(), "hash", block.Hash(), "txs", len(block.Transactions()), "gas", block.GasUsed(), "fees", feesInEther, "root", block.Root(), "elapsed", common.PrettyDuration(elapsed)) + + for _, postFunc := range postFuncs { + if postFunc != nil { + postFunc() + } + } } payload.cond.Broadcast() // fire signal for notifying full block } @@ -173,10 +179,12 @@ func (w *worker) buildPayload(args *BuildPayloadArgs) (*Payload, error) { // Build the initial version with no transaction included. It should be fast // enough to run. The empty payload can at least make sure there is something // to deliver for not missing slot. - empty, _, err := w.getSealingBlock(args.Parent, args.Timestamp, args.FeeRecipient, args.Random, args.Withdrawals, true, args.Transactions, args.GasLimit) + start := time.Now() + empty, _, _, err := w.getSealingBlock(args.Parent, args.Timestamp, args.FeeRecipient, args.Random, args.Withdrawals, true, args.Transactions, args.GasLimit) if err != nil { return nil, err } + log.Debug("Built initial payload", "id", args.Id(), "number", empty.NumberU64(), "hash", empty.Hash(), "elapsed", common.PrettyDuration(time.Since(start))) // Construct a payload object for return. payload := newPayload(empty, args.Id()) if args.NoTxPool { // don't start the background payload updating job if there is no tx pool to pull from @@ -200,10 +208,16 @@ func (w *worker) buildPayload(args *BuildPayloadArgs) (*Payload, error) { select { case <-timer.C: start := time.Now() - block, fees, err := w.getSealingBlock(args.Parent, args.Timestamp, args.FeeRecipient, args.Random, args.Withdrawals, false, args.Transactions, args.GasLimit) - if err == nil { - payload.update(block, fees, time.Since(start)) + block, fees, env, err := w.getSealingBlock(args.Parent, args.Timestamp, args.FeeRecipient, args.Random, args.Withdrawals, false, args.Transactions, args.GasLimit) + if err != nil { + log.Error("Failed to build updated payload", "id", payload.id, "err", err) + return } + + log.Debug("Built updated payload", "id", payload.id, "number", block.NumberU64(), "hash", block.Hash(), "elapsed", common.PrettyDuration(time.Since(start))) + payload.update(block, fees, time.Since(start), func() { + w.cacheMiningBlock(block, env) + }) timer.Reset(w.recommit) case <-payload.stop: log.Info("Stopping work on payload", "id", payload.id, "reason", "delivery") @@ -216,3 +230,40 @@ func (w *worker) buildPayload(args *BuildPayloadArgs) (*Payload, error) { }() return payload, nil } + +func (w *worker) cacheMiningBlock(block *types.Block, env *environment) { + var ( + start = time.Now() + receipts = make([]*types.Receipt, len(env.receipts)) + logs []*types.Log + hash = block.Hash() + ) + for i, taskReceipt := range env.receipts { + receipt := new(types.Receipt) + receipts[i] = receipt + *receipt = *taskReceipt + + // add block location fields + receipt.BlockHash = hash + receipt.BlockNumber = block.Number() + receipt.TransactionIndex = uint(i) + + // Update the block hash in all logs since it is now available and not when the + // receipt/log of individual transactions were created. + receipt.Logs = make([]*types.Log, len(taskReceipt.Logs)) + for i, taskLog := range taskReceipt.Logs { + log := new(types.Log) + receipt.Logs[i] = log + *log = *taskLog + log.BlockHash = hash + } + logs = append(logs, receipt.Logs...) + } + + w.chain.CacheMiningReceipts(hash, receipts) + w.chain.CacheMiningTxLogs(hash, logs) + w.chain.CacheMiningState(hash, env.state) + + log.Info("Successfully cached sealed new block", "number", block.Number(), "root", block.Root(), "hash", hash, + "elapsed", common.PrettyDuration(time.Since(start))) +} diff --git a/miner/worker.go b/miner/worker.go index 37c5787aa4..181c2d16ab 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -176,6 +176,7 @@ type newPayloadResult struct { err error block *types.Block fees *big.Int + env *environment } // getWorkReq represents a request for getting a new sealing work with provided parameters. @@ -326,8 +327,9 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus } worker.newpayloadTimeout = newpayloadTimeout - worker.wg.Add(4) + worker.wg.Add(5) go worker.mainLoop() + go worker.opLoop() go worker.newWorkLoop(recommit) go worker.resultLoop() go worker.taskLoop() @@ -555,6 +557,28 @@ func (w *worker) newWorkLoop(recommit time.Duration) { } } +// opLoop is responsible for generating and submitting sealing work based on +// the received event(building_payload). +func (w *worker) opLoop() { + defer w.wg.Done() + + for { + select { + case req := <-w.getWorkCh: + block, fees, env, err := w.generateWork(req.params) + req.result <- &newPayloadResult{ + err: err, + block: block, + fees: fees, + env: env, + } + // System stopped + case <-w.exitCh: + return + } + } +} + // mainLoop is responsible for generating and submitting sealing work based on // the received event. It can support two modes: automatically generate task and // submit it or return task according to given parameters for various proposes. @@ -577,13 +601,6 @@ func (w *worker) mainLoop() { case req := <-w.newWorkCh: w.commitWork(req.interrupt, req.noempty, req.timestamp) - case req := <-w.getWorkCh: - block, fees, err := w.generateWork(req.params) - req.result <- &newPayloadResult{ - err: err, - block: block, - fees: fees, - } case ev := <-w.chainSideCh: // Short circuit for duplicate side blocks if _, exist := w.localUncles[ev.Block.Hash()]; exist { @@ -804,13 +821,13 @@ func (w *worker) makeEnv(parent *types.Header, header *types.Header, coinbase co parentBlock := w.eth.BlockChain().GetBlockByHash(parent.Hash()) state, release, err = historicalBackend.StateAtBlock(context.Background(), parentBlock, ^uint64(0), nil, false, false) state = state.Copy() - state.EnableWriteOnSharedStorage() release() } } if err != nil { return nil, err } + state.EnableWriteOnSharedStorage() state.StartPrefetcher("miner") // Note the passed coinbase may be different with header.Coinbase. @@ -1125,10 +1142,10 @@ func (w *worker) fillTransactions(interrupt *int32, env *environment) error { } // generateWork generates a sealing block based on the given parameters. -func (w *worker) generateWork(genParams *generateParams) (*types.Block, *big.Int, error) { +func (w *worker) generateWork(genParams *generateParams) (*types.Block, *big.Int, *environment, error) { work, err := w.prepareWork(genParams) if err != nil { - return nil, nil, err + return nil, nil, nil, err } defer work.discard() if work.gasPool == nil { @@ -1140,7 +1157,7 @@ func (w *worker) generateWork(genParams *generateParams) (*types.Block, *big.Int work.state.SetTxContext(tx.Hash(), work.tcount) _, err := w.commitTransaction(work, tx) if err != nil { - return nil, nil, fmt.Errorf("failed to force-include tx: %s type: %d sender: %s nonce: %d, err: %w", tx.Hash(), tx.Type(), from, tx.Nonce(), err) + return nil, nil, nil, fmt.Errorf("failed to force-include tx: %s type: %d sender: %s nonce: %d, err: %w", tx.Hash(), tx.Type(), from, tx.Nonce(), err) } work.tcount++ } @@ -1160,9 +1177,9 @@ func (w *worker) generateWork(genParams *generateParams) (*types.Block, *big.Int } block, err := w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, work.unclelist(), work.receipts, genParams.withdrawals) if err != nil { - return nil, nil, err + return nil, nil, nil, err } - return block, totalFees(block, work.receipts), nil + return block, totalFees(block, work.receipts), work, nil } // commitWork generates several new sealing tasks based on the parent block @@ -1275,7 +1292,7 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti // getSealingBlock generates the sealing block based on the given parameters. // The generation result will be passed back via the given channel no matter // the generation itself succeeds or not. -func (w *worker) getSealingBlock(parent common.Hash, timestamp uint64, coinbase common.Address, random common.Hash, withdrawals types.Withdrawals, noTxs bool, transactions types.Transactions, gasLimit *uint64) (*types.Block, *big.Int, error) { +func (w *worker) getSealingBlock(parent common.Hash, timestamp uint64, coinbase common.Address, random common.Hash, withdrawals types.Withdrawals, noTxs bool, transactions types.Transactions, gasLimit *uint64) (*types.Block, *big.Int, *environment, error) { req := &getWorkReq{ params: &generateParams{ timestamp: timestamp, @@ -1295,11 +1312,11 @@ func (w *worker) getSealingBlock(parent common.Hash, timestamp uint64, coinbase case w.getWorkCh <- req: result := <-req.result if result.err != nil { - return nil, nil, result.err + return nil, nil, nil, result.err } - return result.block, result.fees, nil + return result.block, result.fees, result.env, nil case <-w.exitCh: - return nil, nil, errors.New("miner closed") + return nil, nil, nil, errors.New("miner closed") } } diff --git a/miner/worker_test.go b/miner/worker_test.go index 46f0e0a8f1..e26bb1ba2d 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -635,7 +635,7 @@ func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine co // This API should work even when the automatic sealing is not enabled for _, c := range cases { - block, _, err := w.getSealingBlock(c.parent, timestamp, c.coinbase, c.random, nil, false, nil, nil) + block, _, _, err := w.getSealingBlock(c.parent, timestamp, c.coinbase, c.random, nil, false, nil, nil) if c.expectErr { if err == nil { t.Error("Expect error but get nil") @@ -651,7 +651,7 @@ func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine co // This API should work even when the automatic sealing is enabled w.start() for _, c := range cases { - block, _, err := w.getSealingBlock(c.parent, timestamp, c.coinbase, c.random, nil, false, nil, nil) + block, _, _, err := w.getSealingBlock(c.parent, timestamp, c.coinbase, c.random, nil, false, nil, nil) if c.expectErr { if err == nil { t.Error("Expect error but get nil")