From 0b1d1afdbfd655cc70f8e18f4e1337813aa4437c Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Tue, 26 Nov 2024 09:21:23 -0800 Subject: [PATCH] use upstream statedb (embedded) (#672) --- core/blockchain.go | 14 +- core/state/access_list.go | 146 --- core/state/database.go | 72 +- core/state/dump.go | 225 +---- core/state/iterator.go | 181 ---- core/state/iterator_test.go | 118 --- core/state/journal.go | 323 ------ core/state/metrics.go | 47 - core/state/snapshot/iterator.go | 9 +- core/state/snapshot/snapshot.go | 63 +- core/state/snapshot/snapshot_ext.go | 12 +- core/state/state_object.go | 621 ------------ core/state/state_test.go | 206 ---- core/state/statedb.go | 1432 ++------------------------- core/state/statedb_fuzz_test.go | 405 -------- core/state/statedb_test.go | 458 +-------- core/state/sync_test.go | 83 -- core/state/transient_storage.go | 65 -- core/state/trie_prefetcher.go | 640 ------------ core/state/trie_prefetcher_test.go | 124 --- core/test_blockchain.go | 4 +- core/types/state_account.go | 13 +- nativeasset/contract_test.go | 16 +- scripts/eth-allowed-packages.txt | 1 + sync/statesync/state_syncer.go | 12 +- 25 files changed, 150 insertions(+), 5140 deletions(-) delete mode 100644 core/state/access_list.go delete mode 100644 core/state/iterator.go delete mode 100644 core/state/iterator_test.go delete mode 100644 core/state/journal.go delete mode 100644 core/state/metrics.go delete mode 100644 core/state/state_object.go delete mode 100644 core/state/statedb_fuzz_test.go delete mode 100644 core/state/sync_test.go delete mode 100644 core/state/transient_storage.go delete mode 100644 core/state/trie_prefetcher.go delete mode 100644 core/state/trie_prefetcher_test.go diff --git a/core/blockchain.go b/core/blockchain.go index 2463300ec0..1f94fe845d 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1693,20 +1693,18 @@ func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block) func (bc *BlockChain) commitWithSnap( current *types.Block, parentRoot common.Hash, statedb *state.StateDB, ) (common.Hash, error) { - // If snapshots are enabled, WithBlockHashes must be called as snapshot layers - // are stored by block hash. - if bc.snaps != nil { - bc.snaps.WithBlockHashes(current.Hash(), current.ParentHash()) - } - root, err := statedb.Commit(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number())) + // blockHashes must be passed through Commit since snapshots are based on the + // block hash. + blockHashes := snapshot.WithBlockHashes(current.Hash(), current.ParentHash()) + root, err := statedb.Commit(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), blockHashes) if err != nil { return common.Hash{}, err } // Upstream does not perform a snapshot update if the root is the same as the // parent root, however here the snapshots are based on the block hash, so - // this update is necessary. + // this update is necessary. Note blockHashes are passed here as well. if bc.snaps != nil && root == parentRoot { - if err := bc.snaps.Update(root, parentRoot, nil, nil, nil); err != nil { + if err := bc.snaps.Update(root, parentRoot, nil, nil, nil, blockHashes); err != nil { return common.Hash{}, err } } diff --git a/core/state/access_list.go b/core/state/access_list.go deleted file mode 100644 index 88bddc1ff3..0000000000 --- a/core/state/access_list.go +++ /dev/null @@ -1,146 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package state - -import ( - "github.com/ava-labs/libevm/common" -) - -type accessList struct { - addresses map[common.Address]int - slots []map[common.Hash]struct{} -} - -// ContainsAddress returns true if the address is in the access list. -func (al *accessList) ContainsAddress(address common.Address) bool { - _, ok := al.addresses[address] - return ok -} - -// Contains checks if a slot within an account is present in the access list, returning -// separate flags for the presence of the account and the slot respectively. -func (al *accessList) Contains(address common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) { - idx, ok := al.addresses[address] - if !ok { - // no such address (and hence zero slots) - return false, false - } - if idx == -1 { - // address yes, but no slots - return true, false - } - _, slotPresent = al.slots[idx][slot] - return true, slotPresent -} - -// newAccessList creates a new accessList. -func newAccessList() *accessList { - return &accessList{ - addresses: make(map[common.Address]int), - } -} - -// Copy creates an independent copy of an accessList. -func (a *accessList) Copy() *accessList { - cp := newAccessList() - for k, v := range a.addresses { - cp.addresses[k] = v - } - cp.slots = make([]map[common.Hash]struct{}, len(a.slots)) - for i, slotMap := range a.slots { - newSlotmap := make(map[common.Hash]struct{}, len(slotMap)) - for k := range slotMap { - newSlotmap[k] = struct{}{} - } - cp.slots[i] = newSlotmap - } - return cp -} - -// AddAddress adds an address to the access list, and returns 'true' if the operation -// caused a change (addr was not previously in the list). -func (al *accessList) AddAddress(address common.Address) bool { - if _, present := al.addresses[address]; present { - return false - } - al.addresses[address] = -1 - return true -} - -// AddSlot adds the specified (addr, slot) combo to the access list. -// Return values are: -// - address added -// - slot added -// For any 'true' value returned, a corresponding journal entry must be made. -func (al *accessList) AddSlot(address common.Address, slot common.Hash) (addrChange bool, slotChange bool) { - idx, addrPresent := al.addresses[address] - if !addrPresent || idx == -1 { - // Address not present, or addr present but no slots there - al.addresses[address] = len(al.slots) - slotmap := map[common.Hash]struct{}{slot: {}} - al.slots = append(al.slots, slotmap) - return !addrPresent, true - } - // There is already an (address,slot) mapping - slotmap := al.slots[idx] - if _, ok := slotmap[slot]; !ok { - slotmap[slot] = struct{}{} - // Journal add slot change - return false, true - } - // No changes required - return false, false -} - -// DeleteSlot removes an (address, slot)-tuple from the access list. -// This operation needs to be performed in the same order as the addition happened. -// This method is meant to be used by the journal, which maintains ordering of -// operations. -func (al *accessList) DeleteSlot(address common.Address, slot common.Hash) { - idx, addrOk := al.addresses[address] - // There are two ways this can fail - if !addrOk { - panic("reverting slot change, address not present in list") - } - slotmap := al.slots[idx] - delete(slotmap, slot) - // If that was the last (first) slot, remove it - // Since additions and rollbacks are always performed in order, - // we can delete the item without worrying about screwing up later indices - if len(slotmap) == 0 { - al.slots = al.slots[:idx] - al.addresses[address] = -1 - } -} - -// DeleteAddress removes an address from the access list. This operation -// needs to be performed in the same order as the addition happened. -// This method is meant to be used by the journal, which maintains ordering of -// operations. -func (al *accessList) DeleteAddress(address common.Address) { - delete(al.addresses, address) -} diff --git a/core/state/database.go b/core/state/database.go index e72770bb08..b810bf2c3d 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -31,13 +31,12 @@ import ( "fmt" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/common/lru" + ethstate "github.com/ava-labs/libevm/core/state" "github.com/ava-labs/libevm/crypto" "github.com/ava-labs/libevm/ethdb" "github.com/ava-labs/libevm/trie" - "github.com/ava-labs/libevm/trie/trienode" "github.com/ava-labs/libevm/trie/utils" "github.com/ava-labs/libevm/triedb" "github.com/crate-crypto/go-ipa/banderwagon" @@ -82,74 +81,7 @@ type Database interface { } // Trie is a Ethereum Merkle Patricia trie. -type Trie interface { - // GetKey returns the sha3 preimage of a hashed key that was previously used - // to store a value. - // - // TODO(fjl): remove this when StateTrie is removed - GetKey([]byte) []byte - - // GetAccount abstracts an account read from the trie. It retrieves the - // account blob from the trie with provided account address and decodes it - // with associated decoding algorithm. If the specified account is not in - // the trie, nil will be returned. If the trie is corrupted(e.g. some nodes - // are missing or the account blob is incorrect for decoding), an error will - // be returned. - GetAccount(address common.Address) (*types.StateAccount, error) - - // GetStorage returns the value for key stored in the trie. The value bytes - // must not be modified by the caller. If a node was not found in the database, - // a trie.MissingNodeError is returned. - GetStorage(addr common.Address, key []byte) ([]byte, error) - - // UpdateAccount abstracts an account write to the trie. It encodes the - // provided account object with associated algorithm and then updates it - // in the trie with provided address. - UpdateAccount(address common.Address, account *types.StateAccount) error - - // UpdateStorage associates key with value in the trie. If value has length zero, - // any existing value is deleted from the trie. The value bytes must not be modified - // by the caller while they are stored in the trie. If a node was not found in the - // database, a trie.MissingNodeError is returned. - UpdateStorage(addr common.Address, key, value []byte) error - - // DeleteAccount abstracts an account deletion from the trie. - DeleteAccount(address common.Address) error - - // DeleteStorage removes any existing value for key from the trie. If a node - // was not found in the database, a trie.MissingNodeError is returned. - DeleteStorage(addr common.Address, key []byte) error - - // UpdateContractCode abstracts code write to the trie. It is expected - // to be moved to the stateWriter interface when the latter is ready. - UpdateContractCode(address common.Address, codeHash common.Hash, code []byte) error - - // Hash returns the root hash of the trie. It does not write to the database and - // can be used even if the trie doesn't have one. - Hash() common.Hash - - // Commit collects all dirty nodes in the trie and replace them with the - // corresponding node hash. All collected nodes(including dirty leaves if - // collectLeaf is true) will be encapsulated into a nodeset for return. - // The returned nodeset can be nil if the trie is clean(nothing to commit). - // Once the trie is committed, it's not usable anymore. A new trie must - // be created with new root and updated trie database for following usage - Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) - - // NodeIterator returns an iterator that returns nodes of the trie. Iteration - // starts at the key after the given start key. And error will be returned - // if fails to create node iterator. - NodeIterator(startKey []byte) (trie.NodeIterator, error) - - // Prove constructs a Merkle proof for key. The result contains all encoded nodes - // on the path to the value at key. The value itself is also included in the last - // node and can be retrieved by verifying the proof. - // - // If the trie does not contain a value for key, the returned proof contains all - // nodes of the longest existing prefix of the key (at least the root), ending - // with the node that proves the absence of the key. - Prove(key []byte, proofDb ethdb.KeyValueWriter) error -} +type Trie = ethstate.Trie // NewDatabase creates a backing store for state. The returned database is safe for // concurrent use, but does not retain any recent trie nodes in memory. To keep some diff --git a/core/state/dump.go b/core/state/dump.go index 3d0c81494e..47ef241510 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -27,222 +27,13 @@ package state import ( - "encoding/json" - "fmt" - "time" - - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/common/hexutil" - "github.com/ava-labs/libevm/log" - "github.com/ava-labs/libevm/rlp" - "github.com/ava-labs/libevm/trie" + ethstate "github.com/ava-labs/libevm/core/state" ) -// DumpConfig is a set of options to control what portions of the state will be -// iterated and collected. -type DumpConfig struct { - SkipCode bool - SkipStorage bool - OnlyWithAddresses bool - Start []byte - Max uint64 -} - -// DumpCollector interface which the state trie calls during iteration -type DumpCollector interface { - // OnRoot is called with the state root - OnRoot(common.Hash) - // OnAccount is called once for each account in the trie - OnAccount(*common.Address, DumpAccount) -} - -// DumpAccount represents an account in the state. -type DumpAccount struct { - Balance string `json:"balance"` - Nonce uint64 `json:"nonce"` - Root hexutil.Bytes `json:"root"` - CodeHash hexutil.Bytes `json:"codeHash"` - Code hexutil.Bytes `json:"code,omitempty"` - IsMultiCoin bool `json:"isMultiCoin"` - Storage map[common.Hash]string `json:"storage,omitempty"` - Address *common.Address `json:"address,omitempty"` // Address only present in iterative (line-by-line) mode - AddressHash hexutil.Bytes `json:"key,omitempty"` // If we don't have address, we can output the key - -} - -// Dump represents the full dump in a collected format, as one large map. -type Dump struct { - Root string `json:"root"` - Accounts map[string]DumpAccount `json:"accounts"` - // Next can be set to represent that this dump is only partial, and Next - // is where an iterator should be positioned in order to continue the dump. - Next []byte `json:"next,omitempty"` // nil if no more accounts -} - -// OnRoot implements DumpCollector interface -func (d *Dump) OnRoot(root common.Hash) { - d.Root = fmt.Sprintf("%x", root) -} - -// OnAccount implements DumpCollector interface -func (d *Dump) OnAccount(addr *common.Address, account DumpAccount) { - if addr == nil { - d.Accounts[fmt.Sprintf("pre(%s)", account.AddressHash)] = account - } - if addr != nil { - d.Accounts[(*addr).String()] = account - } -} - -// iterativeDump is a DumpCollector-implementation which dumps output line-by-line iteratively. -type iterativeDump struct { - *json.Encoder -} - -// OnAccount implements DumpCollector interface -func (d iterativeDump) OnAccount(addr *common.Address, account DumpAccount) { - dumpAccount := &DumpAccount{ - Balance: account.Balance, - Nonce: account.Nonce, - Root: account.Root, - CodeHash: account.CodeHash, - IsMultiCoin: account.IsMultiCoin, - Code: account.Code, - Storage: account.Storage, - AddressHash: account.AddressHash, - Address: addr, - } - d.Encode(dumpAccount) -} - -// OnRoot implements DumpCollector interface -func (d iterativeDump) OnRoot(root common.Hash) { - d.Encode(struct { - Root common.Hash `json:"root"` - }{root}) -} - -// DumpToCollector iterates the state according to the given options and inserts -// the items into a collector for aggregation or serialization. -func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []byte) { - // Sanitize the input to allow nil configs - if conf == nil { - conf = new(DumpConfig) - } - var ( - missingPreimages int - accounts uint64 - start = time.Now() - logged = time.Now() - ) - log.Info("Trie dumping started", "root", s.trie.Hash()) - c.OnRoot(s.trie.Hash()) - - trieIt, err := s.trie.NodeIterator(conf.Start) - if err != nil { - log.Error("Trie dumping error", "err", err) - return nil - } - it := trie.NewIterator(trieIt) - for it.Next() { - var data types.StateAccount - if err := rlp.DecodeBytes(it.Value, &data); err != nil { - panic(err) - } - var ( - account = DumpAccount{ - Balance: data.Balance.String(), - Nonce: data.Nonce, - Root: data.Root[:], - CodeHash: data.CodeHash, - IsMultiCoin: types.IsMultiCoin(&data), - AddressHash: it.Key, - } - address *common.Address - addr common.Address - addrBytes = s.trie.GetKey(it.Key) - ) - if addrBytes == nil { - missingPreimages++ - if conf.OnlyWithAddresses { - continue - } - } else { - addr = common.BytesToAddress(addrBytes) - address = &addr - account.Address = address - } - obj := newObject(s, addr, &data) - if !conf.SkipCode { - account.Code = obj.Code() - } - if !conf.SkipStorage { - account.Storage = make(map[common.Hash]string) - tr, err := obj.getTrie() - if err != nil { - log.Error("Failed to load storage trie", "err", err) - continue - } - trieIt, err := tr.NodeIterator(nil) - if err != nil { - log.Error("Failed to create trie iterator", "err", err) - continue - } - storageIt := trie.NewIterator(trieIt) - for storageIt.Next() { - _, content, _, err := rlp.Split(storageIt.Value) - if err != nil { - log.Error("Failed to decode the value returned by iterator", "error", err) - continue - } - account.Storage[common.BytesToHash(s.trie.GetKey(storageIt.Key))] = common.Bytes2Hex(content) - } - } - c.OnAccount(address, account) - accounts++ - if time.Since(logged) > 8*time.Second { - log.Info("Trie dumping in progress", "at", it.Key, "accounts", accounts, - "elapsed", common.PrettyDuration(time.Since(start))) - logged = time.Now() - } - if conf.Max > 0 && accounts >= conf.Max { - if it.Next() { - nextKey = it.Key - } - break - } - } - if missingPreimages > 0 { - log.Warn("Dump incomplete due to missing preimages", "missing", missingPreimages) - } - log.Info("Trie dumping complete", "accounts", accounts, - "elapsed", common.PrettyDuration(time.Since(start))) - - return nextKey -} - -// RawDump returns the state. If the processing is aborted e.g. due to options -// reaching Max, the `Next` key is set on the returned Dump. -func (s *StateDB) RawDump(opts *DumpConfig) Dump { - dump := &Dump{ - Accounts: make(map[string]DumpAccount), - } - dump.Next = s.DumpToCollector(dump, opts) - return *dump -} - -// Dump returns a JSON string representing the entire state as a single json-object -func (s *StateDB) Dump(opts *DumpConfig) []byte { - dump := s.RawDump(opts) - json, err := json.MarshalIndent(dump, "", " ") - if err != nil { - log.Error("Error dumping state", "err", err) - } - return json -} - -// IterativeDump dumps out accounts as json-objects, delimited by linebreaks on stdout -func (s *StateDB) IterativeDump(opts *DumpConfig, output *json.Encoder) { - s.DumpToCollector(iterativeDump{output}, opts) -} +type ( + // XXX: Handling IsMultiCoin / extras in dump + DumpConfig = ethstate.DumpConfig + DumpCollector = ethstate.DumpCollector + DumpAccount = ethstate.DumpAccount + Dump = ethstate.Dump +) diff --git a/core/state/iterator.go b/core/state/iterator.go deleted file mode 100644 index f615f396b3..0000000000 --- a/core/state/iterator.go +++ /dev/null @@ -1,181 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package state - -import ( - "bytes" - "errors" - "fmt" - - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/rlp" - "github.com/ava-labs/libevm/trie" -) - -// nodeIterator is an iterator to traverse the entire state trie post-order, -// including all of the contract code and contract state tries. Preimage is -// required in order to resolve the contract address. -type nodeIterator struct { - state *StateDB // State being iterated - - stateIt trie.NodeIterator // Primary iterator for the global state trie - dataIt trie.NodeIterator // Secondary iterator for the data trie of a contract - - accountHash common.Hash // Hash of the node containing the account - codeHash common.Hash // Hash of the contract source code - code []byte // Source code associated with a contract - - Hash common.Hash // Hash of the current entry being iterated (nil if not standalone) - Parent common.Hash // Hash of the first full ancestor node (nil if current is the root) - - Error error // Failure set in case of an internal error in the iterator -} - -// newNodeIterator creates an post-order state node iterator. -func newNodeIterator(state *StateDB) *nodeIterator { - return &nodeIterator{ - state: state, - } -} - -// Next moves the iterator to the next node, returning whether there are any -// further nodes. In case of an internal error this method returns false and -// sets the Error field to the encountered failure. -func (it *nodeIterator) Next() bool { - // If the iterator failed previously, don't do anything - if it.Error != nil { - return false - } - // Otherwise step forward with the iterator and report any errors - if err := it.step(); err != nil { - it.Error = err - return false - } - return it.retrieve() -} - -// step moves the iterator to the next entry of the state trie. -func (it *nodeIterator) step() error { - // Abort if we reached the end of the iteration - if it.state == nil { - return nil - } - // Initialize the iterator if we've just started - var err error - if it.stateIt == nil { - it.stateIt, err = it.state.trie.NodeIterator(nil) - if err != nil { - return err - } - } - // If we had data nodes previously, we surely have at least state nodes - if it.dataIt != nil { - if cont := it.dataIt.Next(true); !cont { - if it.dataIt.Error() != nil { - return it.dataIt.Error() - } - it.dataIt = nil - } - return nil - } - // If we had source code previously, discard that - if it.code != nil { - it.code = nil - return nil - } - // Step to the next state trie node, terminating if we're out of nodes - if cont := it.stateIt.Next(true); !cont { - if it.stateIt.Error() != nil { - return it.stateIt.Error() - } - it.state, it.stateIt = nil, nil - return nil - } - // If the state trie node is an internal entry, leave as is - if !it.stateIt.Leaf() { - return nil - } - // Otherwise we've reached an account node, initiate data iteration - var account types.StateAccount - if err := rlp.DecodeBytes(it.stateIt.LeafBlob(), &account); err != nil { - return err - } - // Lookup the preimage of account hash - preimage := it.state.trie.GetKey(it.stateIt.LeafKey()) - if preimage == nil { - return errors.New("account address is not available") - } - address := common.BytesToAddress(preimage) - - // Traverse the storage slots belong to the account - dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, address, account.Root, it.state.trie) - if err != nil { - return err - } - it.dataIt, err = dataTrie.NodeIterator(nil) - if err != nil { - return err - } - if !it.dataIt.Next(true) { - it.dataIt = nil - } - if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) { - it.codeHash = common.BytesToHash(account.CodeHash) - it.code, err = it.state.db.ContractCode(address, common.BytesToHash(account.CodeHash)) - if err != nil { - return fmt.Errorf("code %x: %v", account.CodeHash, err) - } - } - it.accountHash = it.stateIt.Parent() - return nil -} - -// retrieve pulls and caches the current state entry the iterator is traversing. -// The method returns whether there are any more data left for inspection. -func (it *nodeIterator) retrieve() bool { - // Clear out any previously set values - it.Hash = common.Hash{} - - // If the iteration's done, return no available data - if it.state == nil { - return false - } - // Otherwise retrieve the current entry - switch { - case it.dataIt != nil: - it.Hash, it.Parent = it.dataIt.Hash(), it.dataIt.Parent() - if it.Parent == (common.Hash{}) { - it.Parent = it.accountHash - } - case it.code != nil: - it.Hash, it.Parent = it.codeHash, it.accountHash - case it.stateIt != nil: - it.Hash, it.Parent = it.stateIt.Hash(), it.stateIt.Parent() - } - return true -} diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go deleted file mode 100644 index 1f79618f9f..0000000000 --- a/core/state/iterator_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// (c) 2023, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package state - -import ( - "testing" - - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/crypto" -) - -// Tests that the node iterator indeed walks over the entire database contents. -func TestNodeIteratorCoverage(t *testing.T) { - testNodeIteratorCoverage(t, rawdb.HashScheme) - testNodeIteratorCoverage(t, rawdb.PathScheme) -} - -func testNodeIteratorCoverage(t *testing.T, scheme string) { - // Create some arbitrary test state to iterate - db, sdb, ndb, root, _ := makeTestState(scheme) - ndb.Commit(root, false) - - state, err := New(root, sdb, nil) - if err != nil { - t.Fatalf("failed to create state trie at %x: %v", root, err) - } - // Gather all the node hashes found by the iterator - hashes := make(map[common.Hash]struct{}) - for it := newNodeIterator(state); it.Next(); { - if it.Hash != (common.Hash{}) { - hashes[it.Hash] = struct{}{} - } - } - // Check in-disk nodes - var ( - seenNodes = make(map[common.Hash]struct{}) - seenCodes = make(map[common.Hash]struct{}) - ) - it := db.NewIterator(nil, nil) - for it.Next() { - ok, hash := isTrieNode(scheme, it.Key(), it.Value()) - if !ok { - continue - } - seenNodes[hash] = struct{}{} - } - it.Release() - - // Check in-disk codes - it = db.NewIterator(nil, nil) - for it.Next() { - ok, hash := rawdb.IsCodeKey(it.Key()) - if !ok { - continue - } - if _, ok := hashes[common.BytesToHash(hash)]; !ok { - t.Errorf("state entry not reported %x", it.Key()) - } - seenCodes[common.BytesToHash(hash)] = struct{}{} - } - it.Release() - - // Cross check the iterated hashes and the database/nodepool content - for hash := range hashes { - _, ok := seenNodes[hash] - if !ok { - _, ok = seenCodes[hash] - } - if !ok { - t.Errorf("failed to retrieve reported node %x", hash) - } - } -} - -// isTrieNode is a helper function which reports if the provided -// database entry belongs to a trie node or not. -func isTrieNode(scheme string, key, val []byte) (bool, common.Hash) { - if scheme == rawdb.HashScheme { - if rawdb.IsLegacyTrieNode(key, val) { - return true, common.BytesToHash(key) - } - } else { - ok := rawdb.IsAccountTrieNode(key) - if ok { - return true, crypto.Keccak256Hash(val) - } - ok = rawdb.IsStorageTrieNode(key) - if ok { - return true, crypto.Keccak256Hash(val) - } - } - return false, common.Hash{} -} diff --git a/core/state/journal.go b/core/state/journal.go deleted file mode 100644 index 9029cfacfc..0000000000 --- a/core/state/journal.go +++ /dev/null @@ -1,323 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package state - -import ( - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/libevm/common" - "github.com/holiman/uint256" -) - -// journalEntry is a modification entry in the state change journal that can be -// reverted on demand. -type journalEntry interface { - // revert undoes the changes introduced by this journal entry. - revert(*StateDB) - - // dirtied returns the Ethereum address modified by this journal entry. - dirtied() *common.Address -} - -// journal contains the list of state modifications applied since the last state -// commit. These are tracked to be able to be reverted in the case of an execution -// exception or request for reversal. -type journal struct { - entries []journalEntry // Current changes tracked by the journal - dirties map[common.Address]int // Dirty accounts and the number of changes -} - -// newJournal creates a new initialized journal. -func newJournal() *journal { - return &journal{ - dirties: make(map[common.Address]int), - } -} - -// append inserts a new modification entry to the end of the change journal. -func (j *journal) append(entry journalEntry) { - j.entries = append(j.entries, entry) - if addr := entry.dirtied(); addr != nil { - j.dirties[*addr]++ - } -} - -// revert undoes a batch of journalled modifications along with any reverted -// dirty handling too. -func (j *journal) revert(statedb *StateDB, snapshot int) { - for i := len(j.entries) - 1; i >= snapshot; i-- { - // Undo the changes made by the operation - j.entries[i].revert(statedb) - - // Drop any dirty tracking induced by the change - if addr := j.entries[i].dirtied(); addr != nil { - if j.dirties[*addr]--; j.dirties[*addr] == 0 { - delete(j.dirties, *addr) - } - } - } - j.entries = j.entries[:snapshot] -} - -// dirty explicitly sets an address to dirty, even if the change entries would -// otherwise suggest it as clean. This method is an ugly hack to handle the RIPEMD -// precompile consensus exception. -func (j *journal) dirty(addr common.Address) { - j.dirties[addr]++ -} - -// length returns the current number of entries in the journal. -func (j *journal) length() int { - return len(j.entries) -} - -type ( - // Changes to the account trie. - createObjectChange struct { - account *common.Address - } - resetObjectChange struct { - account *common.Address - prev *stateObject - prevdestruct bool - prevAccount []byte - prevStorage map[common.Hash][]byte - - prevAccountOriginExist bool - prevAccountOrigin []byte - prevStorageOrigin map[common.Hash][]byte - } - selfDestructChange struct { - account *common.Address - prev bool // whether account had already self-destructed - prevbalance *uint256.Int - } - - // Changes to individual accounts. - balanceChange struct { - account *common.Address - prev *uint256.Int - } - multiCoinEnable struct { - account *common.Address - } - nonceChange struct { - account *common.Address - prev uint64 - } - storageChange struct { - account *common.Address - key, prevalue common.Hash - } - codeChange struct { - account *common.Address - prevcode, prevhash []byte - } - - // Changes to other state values. - refundChange struct { - prev uint64 - } - addLogChange struct { - txhash common.Hash - } - addPreimageChange struct { - hash common.Hash - } - touchChange struct { - account *common.Address - } - // Changes to the access list - accessListAddAccountChange struct { - address *common.Address - } - accessListAddSlotChange struct { - address *common.Address - slot *common.Hash - } - - transientStorageChange struct { - account *common.Address - key, prevalue common.Hash - } -) - -func (ch createObjectChange) revert(s *StateDB) { - delete(s.stateObjects, *ch.account) - delete(s.stateObjectsDirty, *ch.account) -} - -func (ch createObjectChange) dirtied() *common.Address { - return ch.account -} - -func (ch resetObjectChange) revert(s *StateDB) { - s.setStateObject(ch.prev) - if !ch.prevdestruct { - delete(s.stateObjectsDestruct, ch.prev.address) - } - if ch.prevAccount != nil { - s.accounts[ch.prev.addrHash] = ch.prevAccount - } - if ch.prevStorage != nil { - s.storages[ch.prev.addrHash] = ch.prevStorage - } - if ch.prevAccountOriginExist { - s.accountsOrigin[ch.prev.address] = ch.prevAccountOrigin - } - if ch.prevStorageOrigin != nil { - s.storagesOrigin[ch.prev.address] = ch.prevStorageOrigin - } -} - -func (ch resetObjectChange) dirtied() *common.Address { - return ch.account -} - -func (ch selfDestructChange) revert(s *StateDB) { - obj := s.getStateObject(*ch.account) - if obj != nil { - obj.selfDestructed = ch.prev - obj.setBalance(ch.prevbalance) - } -} - -func (ch selfDestructChange) dirtied() *common.Address { - return ch.account -} - -var ripemd = common.HexToAddress("0000000000000000000000000000000000000003") - -func (ch touchChange) revert(s *StateDB) { -} - -func (ch touchChange) dirtied() *common.Address { - return ch.account -} - -func (ch balanceChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setBalance(ch.prev) -} - -func (ch balanceChange) dirtied() *common.Address { - return ch.account -} - -func (ch multiCoinEnable) revert(s *StateDB) { - // XXX: This should be removed once the libevm staedb is in use - types.DisableMultiCoin(&s.getStateObject(*ch.account).data) -} - -func (ch multiCoinEnable) dirtied() *common.Address { - return ch.account -} - -func (ch nonceChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setNonce(ch.prev) -} - -func (ch nonceChange) dirtied() *common.Address { - return ch.account -} - -func (ch codeChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode) -} - -func (ch codeChange) dirtied() *common.Address { - return ch.account -} - -func (ch storageChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setState(ch.key, ch.prevalue) -} - -func (ch storageChange) dirtied() *common.Address { - return ch.account -} - -func (ch transientStorageChange) revert(s *StateDB) { - s.setTransientState(*ch.account, ch.key, ch.prevalue) -} - -func (ch transientStorageChange) dirtied() *common.Address { - return nil -} - -func (ch refundChange) revert(s *StateDB) { - s.refund = ch.prev -} - -func (ch refundChange) dirtied() *common.Address { - return nil -} - -func (ch addLogChange) revert(s *StateDB) { - logs := s.logs[ch.txhash] - if len(logs) == 1 { - delete(s.logs, ch.txhash) - } else { - s.logs[ch.txhash] = logs[:len(logs)-1] - } - s.logSize-- -} - -func (ch addLogChange) dirtied() *common.Address { - return nil -} - -func (ch addPreimageChange) revert(s *StateDB) { - delete(s.preimages, ch.hash) -} - -func (ch addPreimageChange) dirtied() *common.Address { - return nil -} - -func (ch accessListAddAccountChange) revert(s *StateDB) { - /* - One important invariant here, is that whenever a (addr, slot) is added, if the - addr is not already present, the add causes two journal entries: - - one for the address, - - one for the (address,slot) - Therefore, when unrolling the change, we can always blindly delete the - (addr) at this point, since no storage adds can remain when come upon - a single (addr) change. - */ - s.accessList.DeleteAddress(*ch.address) -} - -func (ch accessListAddAccountChange) dirtied() *common.Address { - return nil -} - -func (ch accessListAddSlotChange) revert(s *StateDB) { - s.accessList.DeleteSlot(*ch.address, *ch.slot) -} - -func (ch accessListAddSlotChange) dirtied() *common.Address { - return nil -} diff --git a/core/state/metrics.go b/core/state/metrics.go deleted file mode 100644 index 5e2f060c3a..0000000000 --- a/core/state/metrics.go +++ /dev/null @@ -1,47 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package state - -import "github.com/ava-labs/coreth/metrics" - -var ( - accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil) - storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil) - accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil) - storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil) - accountTrieUpdatedMeter = metrics.NewRegisteredMeter("state/update/accountnodes", nil) - storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil) - accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil) - storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil) - - slotDeletionMaxCount = metrics.NewRegisteredGauge("state/delete/storage/max/slot", nil) - slotDeletionMaxSize = metrics.NewRegisteredGauge("state/delete/storage/max/size", nil) - slotDeletionTimer = metrics.NewRegisteredResettingTimer("state/delete/storage/timer", nil) - slotDeletionCount = metrics.NewRegisteredMeter("state/delete/storage/slot", nil) - slotDeletionSize = metrics.NewRegisteredMeter("state/delete/storage/size", nil) - slotDeletionSkip = metrics.NewRegisteredGauge("state/delete/storage/skip", nil) -) diff --git a/core/state/snapshot/iterator.go b/core/state/snapshot/iterator.go index 7cc5dee33f..addae92deb 100644 --- a/core/state/snapshot/iterator.go +++ b/core/state/snapshot/iterator.go @@ -33,6 +33,7 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/libevm/common" + ethsnapshot "github.com/ava-labs/libevm/core/state/snapshot" "github.com/ava-labs/libevm/ethdb" ) @@ -69,13 +70,7 @@ type AccountIterator interface { // StorageIterator is an iterator to step over the specific storage in a snapshot, // which may or may not be composed of multiple layers. -type StorageIterator interface { - Iterator - - // Slot returns the storage slot the iterator is currently at. An error will - // be returned if the iterator becomes invalid - Slot() []byte -} +type StorageIterator = ethsnapshot.StorageIterator // diffAccountIterator is an account iterator that steps over the accounts (both // live and deleted) contained within a single diff layer. Higher order iterators diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 7df8185d81..c101c16bde 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -35,10 +35,11 @@ import ( "time" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/libevm/common" + ethsnapshot "github.com/ava-labs/libevm/core/state/snapshot" "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/libevm/stateconf" "github.com/ava-labs/libevm/log" "github.com/ava-labs/libevm/triedb" ) @@ -118,28 +119,7 @@ var ( ) // Snapshot represents the functionality supported by a snapshot storage layer. -type Snapshot interface { - // Root returns the root hash for which this snapshot was made. - Root() common.Hash - - // Account directly retrieves the account associated with a particular hash in - // the snapshot slim data format. - Account(hash common.Hash) (*types.SlimAccount, error) - - // AccountRLP directly retrieves the account RLP associated with a particular - // hash in the snapshot slim data format. - AccountRLP(hash common.Hash) ([]byte, error) - - // Storage directly retrieves the storage data associated with a particular hash, - // within a particular account. - Storage(accountHash, storageHash common.Hash) ([]byte, error) - - // AccountIterator creates an account iterator over the account trie given by the provided root hash. - AccountIterator(seek common.Hash) AccountIterator - - // StorageIterator creates a storage iterator over the storage trie given by the provided root hash. - StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) -} +type Snapshot = ethsnapshot.Snapshot // snapshot is the internal version of the snapshot data layer that supports some // additional methods compared to the public API. @@ -164,6 +144,12 @@ type snapshot interface { // Stale return whether this layer has become stale (was flattened across) or // if it's still live. Stale() bool + + // AccountIterator creates an account iterator over an arbitrary layer. + AccountIterator(seek common.Hash) AccountIterator + + // StorageIterator creates a storage iterator over an arbitrary layer. + StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) } // Config includes the configurations for snapshots. @@ -199,11 +185,6 @@ type Tree struct { // Test hooks onFlatten func() // Hook invoked when the bottom most diff layers are flattened - - // XXX: The following fields are to help with integrating the modified snapshot - // with the upstream statedb. - parentBlockHash *common.Hash - blockHash *common.Hash } // New attempts to load an already existing snapshot from a persistent key-value @@ -326,9 +307,13 @@ func (t *Tree) Snapshots(blockHash common.Hash, limits int, nodisk bool) []Snaps return ret } -func (t *Tree) WithBlockHashes(blockHash, parentBlockHash common.Hash) { - t.blockHash = &blockHash - t.parentBlockHash = &parentBlockHash +type blockHashes struct { + blockHash common.Hash + parentBlockHash common.Hash +} + +func WithBlockHashes(blockHash, parentBlockHash common.Hash) stateconf.SnapshotUpdateOption { + return stateconf.WithUpdatePayload(blockHashes{blockHash, parentBlockHash}) } // Update adds a new snapshot into the tree, if that can be linked to an existing @@ -339,13 +324,19 @@ func (t *Tree) Update( destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, + opts ...stateconf.SnapshotUpdateOption, ) error { - blockHash := *t.blockHash - parentBlockHash := *t.parentBlockHash + if len(opts) == 0 { + return fmt.Errorf("missing block hashes") + } + + payload := stateconf.ExtractUpdatePayload(opts[0]) + p, ok := payload.(blockHashes) + if !ok { + return fmt.Errorf("invalid block hashes payload: %T", payload) + } - // Clear the block hashes, they must be set each time - t.blockHash, t.parentBlockHash = nil, nil - return t.UpdateWithBlockHashes(blockHash, blockRoot, parentBlockHash, destructs, accounts, storage) + return t.UpdateWithBlockHashes(p.blockHash, blockRoot, p.parentBlockHash, destructs, accounts, storage) } func (t *Tree) UpdateWithBlockHashes( diff --git a/core/state/snapshot/snapshot_ext.go b/core/state/snapshot/snapshot_ext.go index 829e242d39..8b73f83fcc 100644 --- a/core/state/snapshot/snapshot_ext.go +++ b/core/state/snapshot/snapshot_ext.go @@ -23,9 +23,19 @@ func (t *Tree) DiskStorageIterator(account common.Hash, seek common.Hash) Storag return it } +type SnapshotIterable interface { + Snapshot + + // AccountIterator creates an account iterator over an arbitrary layer. + AccountIterator(seek common.Hash) AccountIterator + + // StorageIterator creates a storage iterator over an arbitrary layer. + StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) +} + // NewDiskLayer creates a diskLayer for direct access to the contents of the on-disk // snapshot. Does not perform any validation. -func NewDiskLayer(diskdb ethdb.KeyValueStore) Snapshot { +func NewDiskLayer(diskdb ethdb.KeyValueStore) SnapshotIterable { return &diskLayer{ diskdb: diskdb, created: time.Now(), diff --git a/core/state/state_object.go b/core/state/state_object.go deleted file mode 100644 index 44345a9b0c..0000000000 --- a/core/state/state_object.go +++ /dev/null @@ -1,621 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package state - -import ( - "bytes" - "fmt" - "io" - "math/big" - "time" - - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/metrics" - "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/crypto" - "github.com/ava-labs/libevm/rlp" - "github.com/ava-labs/libevm/trie/trienode" - "github.com/holiman/uint256" -) - -type Code []byte - -func (c Code) String() string { - return string(c) //strings.Join(Disassemble(c), " ") -} - -type Storage map[common.Hash]common.Hash - -func (s Storage) String() (str string) { - for key, value := range s { - str += fmt.Sprintf("%X : %X\n", key, value) - } - return -} - -func (s Storage) Copy() Storage { - cpy := make(Storage, len(s)) - for key, value := range s { - cpy[key] = value - } - return cpy -} - -// stateObject represents an Ethereum account which is being modified. -// -// The usage pattern is as follows: -// - First you need to obtain a state object. -// - Account values as well as storages can be accessed and modified through the object. -// - Finally, call commit to return the changes of storage trie and update account data. -type stateObject struct { - db *StateDB - address common.Address // address of ethereum account - addrHash common.Hash // hash of ethereum address of the account - origin *types.StateAccount // Account original data without any change applied, nil means it was not existent - data types.StateAccount // Account data with all mutations applied in the scope of block - - // Write caches. - trie Trie // storage trie, which becomes non-nil on first access - code Code // contract bytecode, which gets set when code is loaded - - originStorage Storage // Storage cache of original entries to dedup rewrites - pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block - dirtyStorage Storage // Storage entries that have been modified in the current transaction execution, reset for every transaction - - // Cache flags. - dirtyCode bool // true if the code was updated - - // Flag whether the account was marked as self-destructed. The self-destructed account - // is still accessible in the scope of same transaction. - selfDestructed bool - - // Flag whether the account was marked as deleted. A self-destructed account - // or an account that is considered as empty will be marked as deleted at - // the end of transaction and no longer accessible anymore. - deleted bool - - // Flag whether the object was created in the current transaction - created bool -} - -// empty returns whether the account is considered empty. -func (s *stateObject) empty() bool { - return s.data.Nonce == 0 && s.data.Balance.IsZero() && bytes.Equal(s.data.CodeHash, types.EmptyCodeHash.Bytes()) && !types.IsMultiCoin(&s.data) -} - -// newObject creates a state object. -func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *stateObject { - var ( - origin = acct - created = acct == nil // true if the account was not existent - ) - if acct == nil { - acct = types.NewEmptyStateAccount() - } - return &stateObject{ - db: db, - address: address, - addrHash: crypto.Keccak256Hash(address[:]), - origin: origin, - data: *acct, - originStorage: make(Storage), - pendingStorage: make(Storage), - dirtyStorage: make(Storage), - created: created, - } -} - -// EncodeRLP implements rlp.Encoder. -func (s *stateObject) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, &s.data) -} - -func (s *stateObject) markSelfdestructed() { - s.selfDestructed = true -} - -func (s *stateObject) touch() { - s.db.journal.append(touchChange{ - account: &s.address, - }) - if s.address == ripemd { - // Explicitly put it in the dirty-cache, which is otherwise generated from - // flattened journals. - s.db.journal.dirty(s.address) - } -} - -// getTrie returns the associated storage trie. The trie will be opened -// if it's not loaded previously. An error will be returned if trie can't -// be loaded. -func (s *stateObject) getTrie() (Trie, error) { - if s.trie == nil { - // Try fetching from prefetcher first - if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil { - // When the miner is creating the pending state, there is no prefetcher - s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root) - } - if s.trie == nil { - tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root, s.db.trie) - if err != nil { - return nil, err - } - s.trie = tr - } - } - return s.trie, nil -} - -// GetState retrieves a value from the account storage trie. -func (s *stateObject) GetState(key common.Hash) common.Hash { - // If we have a dirty value for this state entry, return it - value, dirty := s.dirtyStorage[key] - if dirty { - return value - } - // Otherwise return the entry's original value - return s.GetCommittedState(key) -} - -// GetCommittedState retrieves a value from the committed account storage trie. -func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { - // If we have a pending write or clean cached, return that - if value, pending := s.pendingStorage[key]; pending { - return value - } - if value, cached := s.originStorage[key]; cached { - return value - } - // If the object was destructed in *this* block (and potentially resurrected), - // the storage has been cleared out, and we should *not* consult the previous - // database about any storage values. The only possible alternatives are: - // 1) resurrect happened, and new slot values were set -- those should - // have been handles via pendingStorage above. - // 2) we don't have new values, and can deliver empty response back - if _, destructed := s.db.stateObjectsDestruct[s.address]; destructed { - return common.Hash{} - } - // If no live objects are available, attempt to use snapshots - var ( - enc []byte - err error - value common.Hash - ) - if s.db.snap != nil { - start := time.Now() - enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes())) - if metrics.EnabledExpensive { - s.db.SnapshotStorageReads += time.Since(start) - } - if len(enc) > 0 { - _, content, _, err := rlp.Split(enc) - if err != nil { - s.db.setError(err) - } - value.SetBytes(content) - } - } - // If the snapshot is unavailable or reading from it fails, load from the database. - if s.db.snap == nil || err != nil { - start := time.Now() - tr, err := s.getTrie() - if err != nil { - s.db.setError(err) - return common.Hash{} - } - val, err := tr.GetStorage(s.address, key.Bytes()) - if metrics.EnabledExpensive { - s.db.StorageReads += time.Since(start) - } - if err != nil { - s.db.setError(err) - return common.Hash{} - } - value.SetBytes(val) - } - s.originStorage[key] = value - return value -} - -// SetState updates a value in account storage. -func (s *stateObject) SetState(key, value common.Hash) { - // If the new value is the same as old, don't set - prev := s.GetState(key) - if prev == value { - return - } - // New value is different, update and journal the change - s.db.journal.append(storageChange{ - account: &s.address, - key: key, - prevalue: prev, - }) - s.setState(key, value) -} - -func (s *stateObject) setState(key, value common.Hash) { - s.dirtyStorage[key] = value -} - -// finalise moves all dirty storage slots into the pending area to be hashed or -// committed later. It is invoked at the end of every transaction. -func (s *stateObject) finalise(prefetch bool) { - slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) - for key, value := range s.dirtyStorage { - s.pendingStorage[key] = value - if value != s.originStorage[key] { - slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure - } - } - if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { - s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch) - } - if len(s.dirtyStorage) > 0 { - s.dirtyStorage = make(Storage) - } -} - -// updateTrie is responsible for persisting cached storage changes into the -// object's storage trie. In case the storage trie is not yet loaded, this -// function will load the trie automatically. If any issues arise during the -// loading or updating of the trie, an error will be returned. Furthermore, -// this function will return the mutated storage trie, or nil if there is no -// storage change at all. -func (s *stateObject) updateTrie() (Trie, error) { - // Make sure all dirty slots are finalized into the pending storage area - s.finalise(false) - - // Short circuit if nothing changed, don't bother with hashing anything - if len(s.pendingStorage) == 0 { - return s.trie, nil - } - // Track the amount of time wasted on updating the storage trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now()) - } - // The snapshot storage map for the object - var ( - storage map[common.Hash][]byte - origin map[common.Hash][]byte - ) - tr, err := s.getTrie() - if err != nil { - s.db.setError(err) - return nil, err - } - // Insert all the pending storage updates into the trie - usedStorage := make([][]byte, 0, len(s.pendingStorage)) - for key, value := range s.pendingStorage { - // Skip noop changes, persist actual changes - if value == s.originStorage[key] { - continue - } - prev := s.originStorage[key] - s.originStorage[key] = value - - var encoded []byte // rlp-encoded value to be used by the snapshot - if (value == common.Hash{}) { - if err := tr.DeleteStorage(s.address, key[:]); err != nil { - s.db.setError(err) - return nil, err - } - s.db.StorageDeleted += 1 - } else { - // Encoding []byte cannot fail, ok to ignore the error. - trimmed := common.TrimLeftZeroes(value[:]) - encoded, _ = rlp.EncodeToBytes(trimmed) - if err := tr.UpdateStorage(s.address, key[:], trimmed); err != nil { - s.db.setError(err) - return nil, err - } - s.db.StorageUpdated += 1 - } - // Cache the mutated storage slots until commit - if storage == nil { - if storage = s.db.storages[s.addrHash]; storage == nil { - storage = make(map[common.Hash][]byte) - s.db.storages[s.addrHash] = storage - } - } - khash := crypto.HashData(s.db.hasher, key[:]) - storage[khash] = encoded // encoded will be nil if it's deleted - - // Cache the original value of mutated storage slots - if origin == nil { - if origin = s.db.storagesOrigin[s.address]; origin == nil { - origin = make(map[common.Hash][]byte) - s.db.storagesOrigin[s.address] = origin - } - } - // Track the original value of slot only if it's mutated first time - if _, ok := origin[khash]; !ok { - if prev == (common.Hash{}) { - origin[khash] = nil // nil if it was not present previously - } else { - // Encoding []byte cannot fail, ok to ignore the error. - b, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(prev[:])) - origin[khash] = b - } - } - // Cache the items for preloading - usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure - } - if s.db.prefetcher != nil { - s.db.prefetcher.used(s.addrHash, s.data.Root, usedStorage) - } - s.pendingStorage = make(Storage) // reset pending map - return tr, nil -} - -// updateRoot flushes all cached storage mutations to trie, recalculating the -// new storage trie root. -func (s *stateObject) updateRoot() { - // Flush cached storage mutations into trie, short circuit if any error - // is occurred or there is not change in the trie. - tr, err := s.updateTrie() - if err != nil || tr == nil { - return - } - // Track the amount of time wasted on hashing the storage trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.db.StorageHashes += time.Since(start) }(time.Now()) - } - s.data.Root = tr.Hash() -} - -// commit obtains a set of dirty storage trie nodes and updates the account data. -// The returned set can be nil if nothing to commit. This function assumes all -// storage mutations have already been flushed into trie by updateRoot. -func (s *stateObject) commit() (*trienode.NodeSet, error) { - // Short circuit if trie is not even loaded, don't bother with committing anything - if s.trie == nil { - s.origin = s.data.Copy() - return nil, nil - } - // Track the amount of time wasted on committing the storage trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now()) - } - // The trie is currently in an open state and could potentially contain - // cached mutations. Call commit to acquire a set of nodes that have been - // modified, the set can be nil if nothing to commit. - root, nodes, err := s.trie.Commit(false) - if err != nil { - return nil, err - } - s.data.Root = root - - // Update original account data after commit - s.origin = s.data.Copy() - return nodes, nil -} - -// AddBalance adds amount to s's balance. -// It is used to add funds to the destination account of a transfer. -func (s *stateObject) AddBalance(amount *uint256.Int) { - // EIP161: We must check emptiness for the objects such that the account - // clearing (0,0,0 objects) can take effect. - if amount.IsZero() { - if s.empty() { - s.touch() - } - return - } - s.SetBalance(new(uint256.Int).Add(s.Balance(), amount)) -} - -// SubBalance removes amount from s's balance. -// It is used to remove funds from the origin account of a transfer. -func (s *stateObject) SubBalance(amount *uint256.Int) { - if amount.IsZero() { - return - } - s.SetBalance(new(uint256.Int).Sub(s.Balance(), amount)) -} - -func (s *stateObject) SetBalance(amount *uint256.Int) { - s.db.journal.append(balanceChange{ - account: &s.address, - prev: new(uint256.Int).Set(s.data.Balance), - }) - s.setBalance(amount) -} - -// AddBalanceMultiCoin adds amount of coinID to s's balance. -// It is used to add multicoin funds to the destination account of a transfer. -func (s *stateObject) AddBalanceMultiCoin(coinID common.Hash, amount *big.Int, db Database) { - if amount.Sign() == 0 { - if s.empty() { - s.touch() - } - - return - } - s.SetBalanceMultiCoin(coinID, new(big.Int).Add(s.BalanceMultiCoin(coinID, db), amount), db) -} - -// SubBalanceMultiCoin removes amount of coinID from s's balance. -// It is used to remove multicoin funds from the origin account of a transfer. -func (s *stateObject) SubBalanceMultiCoin(coinID common.Hash, amount *big.Int, db Database) { - if amount.Sign() == 0 { - return - } - s.SetBalanceMultiCoin(coinID, new(big.Int).Sub(s.BalanceMultiCoin(coinID, db), amount), db) -} - -func (s *stateObject) SetBalanceMultiCoin(coinID common.Hash, amount *big.Int, db Database) { - s.EnableMultiCoin() - NormalizeCoinID(&coinID) - s.SetState(coinID, common.BigToHash(amount)) -} - -func (s *stateObject) setBalance(amount *uint256.Int) { - s.data.Balance = amount -} - -func (s *stateObject) enableMultiCoin() { - types.EnableMultiCoin(&s.data) -} - -func (s *stateObject) deepCopy(db *StateDB) *stateObject { - obj := &stateObject{ - db: db, - address: s.address, - addrHash: s.addrHash, - origin: s.origin, - data: s.data, - } - if s.trie != nil { - obj.trie = db.db.CopyTrie(s.trie) - } - obj.code = s.code - obj.dirtyStorage = s.dirtyStorage.Copy() - obj.originStorage = s.originStorage.Copy() - obj.pendingStorage = s.pendingStorage.Copy() - obj.selfDestructed = s.selfDestructed - obj.dirtyCode = s.dirtyCode - obj.deleted = s.deleted - return obj -} - -// -// Attribute accessors -// - -// Address returns the address of the contract/account -func (s *stateObject) Address() common.Address { - return s.address -} - -// Code returns the contract code associated with this object, if any. -func (s *stateObject) Code() []byte { - if s.code != nil { - return s.code - } - if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { - return nil - } - code, err := s.db.db.ContractCode(s.address, common.BytesToHash(s.CodeHash())) - if err != nil { - s.db.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err)) - } - s.code = code - return code -} - -// CodeSize returns the size of the contract code associated with this object, -// or zero if none. This method is an almost mirror of Code, but uses a cache -// inside the database to avoid loading codes seen recently. -func (s *stateObject) CodeSize() int { - if s.code != nil { - return len(s.code) - } - if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { - return 0 - } - size, err := s.db.db.ContractCodeSize(s.address, common.BytesToHash(s.CodeHash())) - if err != nil { - s.db.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err)) - } - return size -} - -func (s *stateObject) SetCode(codeHash common.Hash, code []byte) { - prevcode := s.Code() - s.db.journal.append(codeChange{ - account: &s.address, - prevhash: s.CodeHash(), - prevcode: prevcode, - }) - s.setCode(codeHash, code) -} - -func (s *stateObject) setCode(codeHash common.Hash, code []byte) { - s.code = code - s.data.CodeHash = codeHash[:] - s.dirtyCode = true -} - -func (s *stateObject) SetNonce(nonce uint64) { - s.db.journal.append(nonceChange{ - account: &s.address, - prev: s.data.Nonce, - }) - s.setNonce(nonce) -} - -func (s *stateObject) setNonce(nonce uint64) { - s.data.Nonce = nonce -} - -func (s *stateObject) CodeHash() []byte { - return s.data.CodeHash -} - -func (s *stateObject) Balance() *uint256.Int { - return s.data.Balance -} - -// NormalizeCoinID ORs the 0th bit of the first byte in -// [coinID], which ensures this bit will be 1 and all other -// bits are left the same. -// This partitions multicoin storage from normal state storage. -func NormalizeCoinID(coinID *common.Hash) { - coinID[0] |= 0x01 -} - -// NormalizeStateKey ANDs the 0th bit of the first byte in -// [key], which ensures this bit will be 0 and all other bits -// are left the same. -// This partitions normal state storage from multicoin storage. -func NormalizeStateKey(key *common.Hash) { - key[0] &= 0xfe -} - -func (s *stateObject) BalanceMultiCoin(coinID common.Hash, db Database) *big.Int { - NormalizeCoinID(&coinID) - return s.GetState(coinID).Big() -} - -func (s *stateObject) EnableMultiCoin() bool { - if types.IsMultiCoin(&s.data) { - return false - } - s.db.journal.append(multiCoinEnable{ - account: &s.address, - }) - s.enableMultiCoin() - return true -} - -func (s *stateObject) Nonce() uint64 { - return s.data.Nonce -} - -func (s *stateObject) Root() common.Hash { - return s.data.Root -} diff --git a/core/state/state_test.go b/core/state/state_test.go index b4f17983eb..1b9484514b 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -27,17 +27,12 @@ package state import ( - "bytes" - "encoding/json" "testing" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/crypto" "github.com/ava-labs/libevm/ethdb" - "github.com/ava-labs/libevm/triedb" - "github.com/holiman/uint256" ) type stateEnv struct { @@ -51,103 +46,6 @@ func newStateEnv() *stateEnv { return &stateEnv{db: db, state: sdb} } -func TestDump(t *testing.T) { - db := rawdb.NewMemoryDatabase() - tdb := NewDatabaseWithConfig(db, &triedb.Config{Preimages: true}) - sdb, _ := New(types.EmptyRootHash, tdb, nil) - s := &stateEnv{db: db, state: sdb} - - // generate a few entries - obj1 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01})) - obj1.AddBalance(uint256.NewInt(22)) - obj2 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02})) - obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3}) - obj3 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x02})) - obj3.SetBalance(uint256.NewInt(44)) - - // write some of them to the trie - s.state.updateStateObject(obj1) - s.state.updateStateObject(obj2) - root, _ := s.state.Commit(0, false) - - // check that DumpToCollector contains the state objects that are in trie - s.state, _ = New(root, tdb, nil) - got := string(s.state.Dump(nil)) - want := `{ - "root": "1d75ab73e172edb7c3b3c0fd004d9896992fb96b617f6f954641d7618159e5e4", - "accounts": { - "0x0000000000000000000000000000000000000001": { - "balance": "22", - "nonce": 0, - "root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", - "isMultiCoin": false, - "address": "0x0000000000000000000000000000000000000001", - "key": "0x1468288056310c82aa4c01a7e12a10f8111a0560e72b700555479031b86c357d" - }, - "0x0000000000000000000000000000000000000002": { - "balance": "44", - "nonce": 0, - "root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", - "isMultiCoin": false, - "address": "0x0000000000000000000000000000000000000002", - "key": "0xd52688a8f926c816ca1e079067caba944f158e764817b83fc43594370ca9cf62" - }, - "0x0000000000000000000000000000000000000102": { - "balance": "0", - "nonce": 0, - "root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "codeHash": "0x87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3", - "code": "0x03030303030303", - "isMultiCoin": false, - "address": "0x0000000000000000000000000000000000000102", - "key": "0xa17eacbc25cda025e81db9c5c62868822c73ce097cee2a63e33a2e41268358a1" - } - } -}` - if got != want { - t.Errorf("DumpToCollector mismatch:\ngot: %s\nwant: %s\n", got, want) - } -} - -func TestIterativeDump(t *testing.T) { - db := rawdb.NewMemoryDatabase() - tdb := NewDatabaseWithConfig(db, &triedb.Config{Preimages: true}) - sdb, _ := New(types.EmptyRootHash, tdb, nil) - s := &stateEnv{db: db, state: sdb} - - // generate a few entries - obj1 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01})) - obj1.AddBalance(uint256.NewInt(22)) - obj2 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02})) - obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3}) - obj3 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x02})) - obj3.SetBalance(uint256.NewInt(44)) - obj4 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x00})) - obj4.AddBalance(uint256.NewInt(1337)) - - // write some of them to the trie - s.state.updateStateObject(obj1) - s.state.updateStateObject(obj2) - root, _ := s.state.Commit(0, false) - s.state, _ = New(root, tdb, nil) - - b := &bytes.Buffer{} - s.state.IterativeDump(nil, json.NewEncoder(b)) - // check that DumpToCollector contains the state objects that are in trie - got := b.String() - want := `{"root":"0x0ffca661efa3b7504ac015083994c94fd7d0d24db60354c717c936afcced762a"} -{"balance":"22","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000001","key":"0x1468288056310c82aa4c01a7e12a10f8111a0560e72b700555479031b86c357d"} -{"balance":"1337","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000000","key":"0x5380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312a"} -{"balance":"0","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0x87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3","code":"0x03030303030303","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000102","key":"0xa17eacbc25cda025e81db9c5c62868822c73ce097cee2a63e33a2e41268358a1"} -{"balance":"44","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000002","key":"0xd52688a8f926c816ca1e079067caba944f158e764817b83fc43594370ca9cf62"} -` - if got != want { - t.Errorf("DumpToCollector mismatch:\ngot: %s\nwant: %s\n", got, want) - } -} - func TestNull(t *testing.T) { s := newStateEnv() address := common.HexToAddress("0x823140710bf13990e4500136726d8b55") @@ -205,107 +103,3 @@ func TestSnapshotEmpty(t *testing.T) { s := newStateEnv() s.state.RevertToSnapshot(s.state.Snapshot()) } - -func TestSnapshot2(t *testing.T) { - state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) - - stateobjaddr0 := common.BytesToAddress([]byte("so0")) - stateobjaddr1 := common.BytesToAddress([]byte("so1")) - var storageaddr common.Hash - - data0 := common.BytesToHash([]byte{17}) - data1 := common.BytesToHash([]byte{18}) - - state.SetState(stateobjaddr0, storageaddr, data0) - state.SetState(stateobjaddr1, storageaddr, data1) - - // db, trie are already non-empty values - so0 := state.getStateObject(stateobjaddr0) - so0.SetBalance(uint256.NewInt(42)) - so0.SetNonce(43) - so0.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e'}), []byte{'c', 'a', 'f', 'e'}) - so0.selfDestructed = false - so0.deleted = false - state.setStateObject(so0) - - root, _ := state.Commit(0, false) - state, _ = New(root, state.db, state.snaps) - - // and one with deleted == true - so1 := state.getStateObject(stateobjaddr1) - so1.SetBalance(uint256.NewInt(52)) - so1.SetNonce(53) - so1.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e', '2'}), []byte{'c', 'a', 'f', 'e', '2'}) - so1.selfDestructed = true - so1.deleted = true - state.setStateObject(so1) - - so1 = state.getStateObject(stateobjaddr1) - if so1 != nil { - t.Fatalf("deleted object not nil when getting") - } - - snapshot := state.Snapshot() - state.RevertToSnapshot(snapshot) - - so0Restored := state.getStateObject(stateobjaddr0) - // Update lazily-loaded values before comparing. - so0Restored.GetState(storageaddr) - so0Restored.Code() - // non-deleted is equal (restored) - compareStateObjects(so0Restored, so0, t) - - // deleted should be nil, both before and after restore of state copy - so1Restored := state.getStateObject(stateobjaddr1) - if so1Restored != nil { - t.Fatalf("deleted object not nil after restoring snapshot: %+v", so1Restored) - } -} - -func compareStateObjects(so0, so1 *stateObject, t *testing.T) { - if so0.Address() != so1.Address() { - t.Fatalf("Address mismatch: have %v, want %v", so0.address, so1.address) - } - if so0.Balance().Cmp(so1.Balance()) != 0 { - t.Fatalf("Balance mismatch: have %v, want %v", so0.Balance(), so1.Balance()) - } - if so0.Nonce() != so1.Nonce() { - t.Fatalf("Nonce mismatch: have %v, want %v", so0.Nonce(), so1.Nonce()) - } - if so0.data.Root != so1.data.Root { - t.Errorf("Root mismatch: have %x, want %x", so0.data.Root[:], so1.data.Root[:]) - } - if !bytes.Equal(so0.CodeHash(), so1.CodeHash()) { - t.Fatalf("CodeHash mismatch: have %v, want %v", so0.CodeHash(), so1.CodeHash()) - } - if !bytes.Equal(so0.code, so1.code) { - t.Fatalf("Code mismatch: have %v, want %v", so0.code, so1.code) - } - - if len(so1.dirtyStorage) != len(so0.dirtyStorage) { - t.Errorf("Dirty storage size mismatch: have %d, want %d", len(so1.dirtyStorage), len(so0.dirtyStorage)) - } - for k, v := range so1.dirtyStorage { - if so0.dirtyStorage[k] != v { - t.Errorf("Dirty storage key %x mismatch: have %v, want %v", k, so0.dirtyStorage[k], v) - } - } - for k, v := range so0.dirtyStorage { - if so1.dirtyStorage[k] != v { - t.Errorf("Dirty storage key %x mismatch: have %v, want none.", k, v) - } - } - if len(so1.originStorage) != len(so0.originStorage) { - t.Errorf("Origin storage size mismatch: have %d, want %d", len(so1.originStorage), len(so0.originStorage)) - } - for k, v := range so1.originStorage { - if so0.originStorage[k] != v { - t.Errorf("Origin storage key %x mismatch: have %v, want %v", k, so0.originStorage[k], v) - } - } - for k, v := range so0.originStorage { - if so1.originStorage[k] != v { - t.Errorf("Origin storage key %x mismatch: have %v, want none.", k, v) - } - } -} diff --git a/core/state/statedb.go b/core/state/statedb.go index ecde88fb66..ebb0dcac89 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -28,48 +28,14 @@ package state import ( - "fmt" "math/big" - "reflect" - "sort" - "time" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/crypto" - "github.com/ava-labs/libevm/log" - "github.com/ava-labs/libevm/params" - "github.com/ava-labs/libevm/trie" - "github.com/ava-labs/libevm/trie/trienode" - "github.com/ava-labs/libevm/trie/triestate" + ethstate "github.com/ava-labs/libevm/core/state" "github.com/holiman/uint256" ) -const ( - storageDeleteLimit = 512 * 1024 * 1024 -) - -type revision struct { - id int - journalIndex int -} - -type snapshotTree interface { - Snapshot(root common.Hash) snapshot.Snapshot - Update( - blockRoot common.Hash, - parentRoot common.Hash, - destructs map[common.Hash]struct{}, - accounts map[common.Hash][]byte, - storage map[common.Hash]map[common.Hash][]byte, - ) error - StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (snapshot.StorageIterator, error) - Cap(root common.Hash, layers int) error -} - // StateDB structs within the ethereum protocol are used to store anything // within the merkle trie. StateDBs take care of caching and storing // nested states. It's the general query interface to retrieve: @@ -82,917 +48,82 @@ type snapshotTree interface { // must be created with new root and updated database for accessing post- // commit states. type StateDB struct { - db Database - prefetcher *triePrefetcher - trie Trie - hasher crypto.KeccakState - snaps snapshotTree // Nil if snapshot is not available - snap snapshot.Snapshot // Nil if snapshot is not available - - // originalRoot is the pre-state root, before any changes were made. - // It will be updated when the Commit is called. - originalRoot common.Hash - - // These maps hold the state changes (including the corresponding - // original value) that occurred in this **block**. - accounts map[common.Hash][]byte // The mutated accounts in 'slim RLP' encoding - storages map[common.Hash]map[common.Hash][]byte // The mutated slots in prefix-zero trimmed rlp format - accountsOrigin map[common.Address][]byte // The original value of mutated accounts in 'slim RLP' encoding - storagesOrigin map[common.Address]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format - - // This map holds 'live' objects, which will get modified while processing - // a state transition. - stateObjects map[common.Address]*stateObject - stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie - stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution - stateObjectsDestruct map[common.Address]*types.StateAccount // State objects destructed in the block along with its previous value - - // DB error. - // State objects are used by the consensus core and VM which are - // unable to deal with database-level errors. Any error that occurs - // during a database read is memoized here and will eventually be - // returned by StateDB.Commit. Notably, this error is also shared - // by all cached state objects in case the database failure occurs - // when accessing state of accounts. - dbErr error + *ethstate.StateDB - // The refund counter, also used by state transitioning. - refund uint64 - - // The tx context and all occurred logs in the scope of transaction. + // The tx context thash common.Hash txIndex int - logs map[common.Hash][]*types.Log - logSize uint - - // Preimages occurred seen by VM in the scope of block. - preimages map[common.Hash][]byte - - // Per-transaction access list - accessList *accessList - - // Transient storage - transientStorage transientStorage - - // Journal of state modifications. This is the backbone of - // Snapshot and RevertToSnapshot. - journal *journal - validRevisions []revision - nextRevisionId int - // Measurements gathered during execution for debugging purposes - AccountReads time.Duration - AccountHashes time.Duration - AccountUpdates time.Duration - AccountCommits time.Duration - StorageReads time.Duration - StorageHashes time.Duration - StorageUpdates time.Duration - StorageCommits time.Duration - SnapshotAccountReads time.Duration - SnapshotStorageReads time.Duration - SnapshotCommits time.Duration - TrieDBCommits time.Duration - - AccountUpdated int - StorageUpdated int - AccountDeleted int - StorageDeleted int - - // Testing hooks - onCommit func(states *triestate.Set) // Hook invoked when commit is performed + // Some fields remembered as they are used in tests + db Database + snaps ethstate.SnapshotTree } // New creates a new state from a given trie. -func New(root common.Hash, db Database, snaps snapshotTree) (*StateDB, error) { - tr, err := db.OpenTrie(root) +func New(root common.Hash, db Database, snaps ethstate.SnapshotTree) (*StateDB, error) { + stateDB, err := ethstate.New(root, db, snaps) if err != nil { return nil, err } - sdb := &StateDB{ - db: db, - trie: tr, - originalRoot: root, - snaps: snaps, - accounts: make(map[common.Hash][]byte), - storages: make(map[common.Hash]map[common.Hash][]byte), - accountsOrigin: make(map[common.Address][]byte), - storagesOrigin: make(map[common.Address]map[common.Hash][]byte), - stateObjects: make(map[common.Address]*stateObject), - stateObjectsPending: make(map[common.Address]struct{}), - stateObjectsDirty: make(map[common.Address]struct{}), - stateObjectsDestruct: make(map[common.Address]*types.StateAccount), - logs: make(map[common.Hash][]*types.Log), - preimages: make(map[common.Hash][]byte), - journal: newJournal(), - accessList: newAccessList(), - transientStorage: newTransientStorage(), - hasher: crypto.NewKeccakState(), - } - if sdb.snaps != nil { - // XXX: Make sure we treat incoming `nil` ptrs as `nil` values, not an - // interface to a nil ptr - v := reflect.ValueOf(sdb.snaps) - if v.Kind() == reflect.Ptr && v.IsNil() { - sdb.snaps = nil - } - } - if sdb.snaps != nil { - sdb.snap = sdb.snaps.Snapshot(root) - } - return sdb, nil + return &StateDB{ + StateDB: stateDB, + db: db, + snaps: snaps, + }, nil } // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the // commit phase, most of the needed data is already hot. func (s *StateDB) StartPrefetcher(namespace string, maxConcurrency int) { - if s.prefetcher != nil { - s.prefetcher.close() - s.prefetcher = nil - } - if s.snap != nil { - s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace, maxConcurrency) - } -} - -// StopPrefetcher terminates a running prefetcher and reports any leftover stats -// from the gathered metrics. -func (s *StateDB) StopPrefetcher() { - if s.prefetcher != nil { - s.prefetcher.close() - s.prefetcher = nil - } -} - -// setError remembers the first non-nil error it is called with. -func (s *StateDB) setError(err error) { - if s.dbErr == nil { - s.dbErr = err - } -} - -// Error returns the memorized database failure occurred earlier. -func (s *StateDB) Error() error { - return s.dbErr -} - -func (s *StateDB) AddLog(log *types.Log) { - s.journal.append(addLogChange{txhash: s.thash}) - - log.TxHash = s.thash - log.TxIndex = uint(s.txIndex) - log.Index = s.logSize - s.logs[s.thash] = append(s.logs[s.thash], log) - s.logSize++ -} - -// GetLogs returns the logs matching the specified transaction hash, and annotates -// them with the given blockNumber and blockHash. -func (s *StateDB) GetLogs(hash common.Hash, blockNumber uint64, blockHash common.Hash) []*types.Log { - logs := s.logs[hash] - for _, l := range logs { - l.BlockNumber = blockNumber - l.BlockHash = blockHash - } - return logs -} - -func (s *StateDB) Logs() []*types.Log { - var logs []*types.Log - for _, lgs := range s.logs { - logs = append(logs, lgs...) - } - return logs -} - -// AddPreimage records a SHA3 preimage seen by the VM. -func (s *StateDB) AddPreimage(hash common.Hash, preimage []byte) { - if _, ok := s.preimages[hash]; !ok { - s.journal.append(addPreimageChange{hash: hash}) - pi := make([]byte, len(preimage)) - copy(pi, preimage) - s.preimages[hash] = pi - } -} - -// Preimages returns a list of SHA3 preimages that have been submitted. -func (s *StateDB) Preimages() map[common.Hash][]byte { - return s.preimages -} - -// AddRefund adds gas to the refund counter -func (s *StateDB) AddRefund(gas uint64) { - s.journal.append(refundChange{prev: s.refund}) - s.refund += gas -} - -// SubRefund removes gas from the refund counter. -// This method will set the refund counter to 0 if the gas is greater than the current refund. -func (s *StateDB) SubRefund(gas uint64) { - s.journal.append(refundChange{prev: s.refund}) - if gas > s.refund { - log.Warn("Setting refund to 0", "currentRefund", s.refund, "gas", gas) - s.refund = 0 - return - } - s.refund -= gas -} - -// Exist reports whether the given account address exists in the state. -// Notably this also returns true for self-destructed accounts. -func (s *StateDB) Exist(addr common.Address) bool { - return s.getStateObject(addr) != nil -} - -// Empty returns whether the state object is either non-existent -// or empty according to the EIP161 specification (balance = nonce = code = 0) -func (s *StateDB) Empty(addr common.Address) bool { - so := s.getStateObject(addr) - return so == nil || so.empty() -} - -// GetBalance retrieves the balance from the given address or 0 if object not found -func (s *StateDB) GetBalance(addr common.Address) *uint256.Int { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.Balance() - } - return common.U2560 + s.StateDB.StartPrefetcher(namespace) // XXX: Trie prefetcher parallelism should be added back } // Retrieve the balance from the given address or 0 if object not found func (s *StateDB) GetBalanceMultiCoin(addr common.Address, coinID common.Hash) *big.Int { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.BalanceMultiCoin(coinID, s.db) - } - return new(big.Int).Set(common.Big0) -} - -// GetNonce retrieves the nonce from the given address or 0 if object not found -func (s *StateDB) GetNonce(addr common.Address) uint64 { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.Nonce() - } - - return 0 -} - -// GetStorageRoot retrieves the storage root from the given address or empty -// if object not found. -func (s *StateDB) GetStorageRoot(addr common.Address) common.Hash { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.Root() - } - return common.Hash{} -} - -// TxIndex returns the current transaction index set by Prepare. -func (s *StateDB) TxIndex() int { - return s.txIndex -} - -func (s *StateDB) GetCode(addr common.Address) []byte { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.Code() - } - return nil -} - -func (s *StateDB) GetCodeSize(addr common.Address) int { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.CodeSize() - } - return 0 -} - -func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return common.BytesToHash(stateObject.CodeHash()) - } - return common.Hash{} + NormalizeCoinID(&coinID) + return s.StateDB.GetState(addr, coinID).Big() } // GetState retrieves a value from the given account's storage trie. func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { - stateObject := s.getStateObject(addr) - if stateObject != nil { - NormalizeStateKey(&hash) - return stateObject.GetState(hash) - } - return common.Hash{} -} - -// GetCommittedState retrieves a value from the given account's committed storage trie. -func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.GetCommittedState(hash) - } - return common.Hash{} -} - -// Database retrieves the low level database supporting the lower level trie ops. -func (s *StateDB) Database() Database { - return s.db -} - -func (s *StateDB) HasSelfDestructed(addr common.Address) bool { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.selfDestructed - } - return false -} - -/* - * SETTERS - */ - -// AddBalance adds amount to the account associated with addr. -func (s *StateDB) AddBalance(addr common.Address, amount *uint256.Int) { - stateObject := s.getOrNewStateObject(addr) - if stateObject != nil { - stateObject.AddBalance(amount) - } -} - -// SubBalance subtracts amount from the account associated with addr. -func (s *StateDB) SubBalance(addr common.Address, amount *uint256.Int) { - stateObject := s.getOrNewStateObject(addr) - if stateObject != nil { - stateObject.SubBalance(amount) - } -} - -func (s *StateDB) SetBalance(addr common.Address, amount *uint256.Int) { - stateObject := s.getOrNewStateObject(addr) - if stateObject != nil { - stateObject.SetBalance(amount) - } + NormalizeStateKey(&hash) + return s.StateDB.GetState(addr, hash) } // AddBalance adds amount to the account associated with addr. func (s *StateDB) AddBalanceMultiCoin(addr common.Address, coinID common.Hash, amount *big.Int) { - stateObject := s.getOrNewStateObject(addr) - if stateObject != nil { - stateObject.AddBalanceMultiCoin(coinID, amount, s.db) - } -} - -// SubBalance subtracts amount from the account associated with addr. -func (s *StateDB) SubBalanceMultiCoin(addr common.Address, coinID common.Hash, amount *big.Int) { - stateObject := s.getOrNewStateObject(addr) - if stateObject != nil { - stateObject.SubBalanceMultiCoin(coinID, amount, s.db) - } -} - -func (s *StateDB) SetBalanceMultiCoin(addr common.Address, coinID common.Hash, amount *big.Int) { - stateObject := s.getOrNewStateObject(addr) - if stateObject != nil { - stateObject.SetBalanceMultiCoin(coinID, amount, s.db) - } -} - -func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { - stateObject := s.getOrNewStateObject(addr) - if stateObject != nil { - stateObject.SetNonce(nonce) - } -} - -func (s *StateDB) SetCode(addr common.Address, code []byte) { - stateObject := s.getOrNewStateObject(addr) - if stateObject != nil { - stateObject.SetCode(crypto.Keccak256Hash(code), code) - } -} - -func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { - stateObject := s.getOrNewStateObject(addr) - if stateObject != nil { - NormalizeStateKey(&key) - stateObject.SetState(key, value) - } -} - -// SetStorage replaces the entire storage for the specified account with given -// storage. This function should only be used for debugging and the mutations -// must be discarded afterwards. -func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) { - // SetStorage needs to wipe existing storage. We achieve this by pretending - // that the account self-destructed earlier in this block, by flagging - // it in stateObjectsDestruct. The effect of doing so is that storage lookups - // will not hit disk, since it is assumed that the disk-data is belonging - // to a previous incarnation of the object. - // - // TODO(rjl493456442) this function should only be supported by 'unwritable' - // state and all mutations made should all be discarded afterwards. - if _, ok := s.stateObjectsDestruct[addr]; !ok { - s.stateObjectsDestruct[addr] = nil - } - stateObject := s.getOrNewStateObject(addr) - for k, v := range storage { - stateObject.SetState(k, v) - } -} - -// SelfDestruct marks the given account as selfdestructed. -// This clears the account balance. -// -// The account's state object is still available until the state is committed, -// getStateObject will return a non-nil account after SelfDestruct. -func (s *StateDB) SelfDestruct(addr common.Address) { - stateObject := s.getStateObject(addr) - if stateObject == nil { + if amount.Sign() == 0 { + s.AddBalance(addr, new(uint256.Int)) // used to cause touch return } - s.journal.append(selfDestructChange{ - account: &addr, - prev: stateObject.selfDestructed, - prevbalance: new(uint256.Int).Set(stateObject.Balance()), - }) - stateObject.markSelfdestructed() - stateObject.data.Balance = new(uint256.Int) -} - -func (s *StateDB) Selfdestruct6780(addr common.Address) { - stateObject := s.getStateObject(addr) - if stateObject == nil { - return - } - - if stateObject.created { - s.SelfDestruct(addr) + if !ethstate.GetExtra(s.StateDB, types.IsMultiCoinPayloads, addr) { + ethstate.SetExtra(s.StateDB, types.IsMultiCoinPayloads, addr, true) } + newAmount := new(big.Int).Add(s.GetBalanceMultiCoin(addr, coinID), amount) + NormalizeCoinID(&coinID) + s.StateDB.SetState(addr, coinID, common.BigToHash(newAmount)) } -// SetTransientState sets transient storage for a given account. It -// adds the change to the journal so that it can be rolled back -// to its previous value if there is a revert. -func (s *StateDB) SetTransientState(addr common.Address, key, value common.Hash) { - prev := s.GetTransientState(addr, key) - if prev == value { +// SubBalance subtracts amount from the account associated with addr. +func (s *StateDB) SubBalanceMultiCoin(addr common.Address, coinID common.Hash, amount *big.Int) { + if amount.Sign() == 0 { return } - s.journal.append(transientStorageChange{ - account: &addr, - key: key, - prevalue: prev, - }) - s.setTransientState(addr, key, value) -} - -// setTransientState is a lower level setter for transient storage. It -// is called during a revert to prevent modifications to the journal. -func (s *StateDB) setTransientState(addr common.Address, key, value common.Hash) { - s.transientStorage.Set(addr, key, value) -} - -// GetTransientState gets transient storage for a given account. -func (s *StateDB) GetTransientState(addr common.Address, key common.Hash) common.Hash { - return s.transientStorage.Get(addr, key) -} - -// -// Setting, updating & deleting state object methods. -// - -// updateStateObject writes the given object to the trie. -func (s *StateDB) updateStateObject(obj *stateObject) { - // Track the amount of time wasted on updating the account from the trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) - } - // Encode the account and update the account trie - addr := obj.Address() - if err := s.trie.UpdateAccount(addr, &obj.data); err != nil { - s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err)) - } - if obj.dirtyCode { - s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code) - } - // Cache the data until commit. Note, this update mechanism is not symmetric - // to the deletion, because whereas it is enough to track account updates - // at commit time, deletions need tracking at transaction boundary level to - // ensure we capture state clearing. - s.accounts[obj.addrHash] = types.SlimAccountRLP(obj.data) - - // Track the original value of mutated account, nil means it was not present. - // Skip if it has been tracked (because updateStateObject may be called - // multiple times in a block). - if _, ok := s.accountsOrigin[obj.address]; !ok { - if obj.origin == nil { - s.accountsOrigin[obj.address] = nil - } else { - s.accountsOrigin[obj.address] = types.SlimAccountRLP(*obj.origin) - } + // Note: It's not needed to set the IsMultiCoin (extras) flag here, as this + // call would always be preceded by a call to AddBalanceMultiCoin, which would + // set the extra flag. Seems we should remove the redundant code. + if !ethstate.GetExtra(s.StateDB, types.IsMultiCoinPayloads, addr) { + ethstate.SetExtra(s.StateDB, types.IsMultiCoinPayloads, addr, true) } + newAmount := new(big.Int).Sub(s.GetBalanceMultiCoin(addr, coinID), amount) + NormalizeCoinID(&coinID) + s.StateDB.SetState(addr, coinID, common.BigToHash(newAmount)) } -// deleteStateObject removes the given object from the state trie. -func (s *StateDB) deleteStateObject(obj *stateObject) { - // Track the amount of time wasted on deleting the account from the trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) - } - // Delete the account from the trie - addr := obj.Address() - if err := s.trie.DeleteAccount(addr); err != nil { - s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) - } -} - -// getStateObject retrieves a state object given by the address, returning nil if -// the object is not found or was deleted in this execution context. If you need -// to differentiate between non-existent/just-deleted, use getDeletedStateObject. -func (s *StateDB) getStateObject(addr common.Address) *stateObject { - if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { - return obj - } - return nil -} - -// getDeletedStateObject is similar to getStateObject, but instead of returning -// nil for a deleted state object, it returns the actual object with the deleted -// flag set. This is needed by the state journal to revert to the correct s- -// destructed object instead of wiping all knowledge about the state object. -func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { - // Prefer live objects if any is available - if obj := s.stateObjects[addr]; obj != nil { - return obj - } - // If no live objects are available, attempt to use snapshots - var data *types.StateAccount - if s.snap != nil { - start := time.Now() - acc, err := s.snap.AccountRLP(crypto.HashData(s.hasher, addr.Bytes())) - if metrics.EnabledExpensive { - s.SnapshotAccountReads += time.Since(start) - } - if err == nil { - if len(acc) == 0 { - return nil - } - // XXX: This is temporary until using the upstream statedb. - // Otherwise we must set IsMultiCoin based on - data, err = types.FullAccount(acc) - if err != nil { - s.setError(fmt.Errorf("getDeletedStateObject (%x) error: %w", addr.Bytes(), err)) - return nil - } - } - } - // If snapshot unavailable or reading from it failed, load from the database - if data == nil { - start := time.Now() - var err error - data, err = s.trie.GetAccount(addr) - if metrics.EnabledExpensive { - s.AccountReads += time.Since(start) - } - if err != nil { - s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %w", addr.Bytes(), err)) - return nil - } - if data == nil { - return nil - } - } - // Insert into the live set - obj := newObject(s, addr, data) - s.setStateObject(obj) - return obj -} - -func (s *StateDB) setStateObject(object *stateObject) { - s.stateObjects[object.Address()] = object -} - -func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject { - return s.getOrNewStateObject(addr) -} - -// getOrNewStateObject retrieves a state object or create a new state object if nil. -func (s *StateDB) getOrNewStateObject(addr common.Address) *stateObject { - stateObject := s.getStateObject(addr) - if stateObject == nil { - stateObject, _ = s.createObject(addr) - } - return stateObject -} - -// createObject creates a new state object. If there is an existing account with -// the given address, it is overwritten and returned as the second return value. -func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) { - prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! - newobj = newObject(s, addr, nil) - if prev == nil { - s.journal.append(createObjectChange{account: &addr}) - } else { - // The original account should be marked as destructed and all cached - // account and storage data should be cleared as well. Note, it must - // be done here, otherwise the destruction event of "original account" - // will be lost. - _, prevdestruct := s.stateObjectsDestruct[prev.address] - if !prevdestruct { - s.stateObjectsDestruct[prev.address] = prev.origin - } - // There may be some cached account/storage data already since IntermediateRoot - // will be called for each transaction before byzantium fork which will always - // cache the latest account/storage data. - prevAccount, ok := s.accountsOrigin[prev.address] - s.journal.append(resetObjectChange{ - account: &addr, - prev: prev, - prevdestruct: prevdestruct, - prevAccount: s.accounts[prev.addrHash], - prevStorage: s.storages[prev.addrHash], - prevAccountOriginExist: ok, - prevAccountOrigin: prevAccount, - prevStorageOrigin: s.storagesOrigin[prev.address], - }) - delete(s.accounts, prev.addrHash) - delete(s.storages, prev.addrHash) - delete(s.accountsOrigin, prev.address) - delete(s.storagesOrigin, prev.address) - } - s.setStateObject(newobj) - if prev != nil && !prev.deleted { - return newobj, prev - } - return newobj, nil -} - -// CreateAccount explicitly creates a state object. If a state object with the address -// already exists the balance is carried over to the new account. -// -// CreateAccount is called during the EVM CREATE operation. The situation might arise that -// a contract does the following: -// -// 1. sends funds to sha(account ++ (nonce + 1)) -// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) -// -// Carrying over the balance ensures that Ether doesn't disappear. -func (s *StateDB) CreateAccount(addr common.Address) { - newObj, prev := s.createObject(addr) - if prev != nil { - newObj.setBalance(prev.data.Balance) - } -} - -// Copy creates a deep, independent copy of the state. -// Snapshots of the copied state cannot be applied to the copy. -func (s *StateDB) Copy() *StateDB { - // Copy all the basic fields, initialize the memory ones - state := &StateDB{ - db: s.db, - trie: s.db.CopyTrie(s.trie), - originalRoot: s.originalRoot, - accounts: make(map[common.Hash][]byte), - storages: make(map[common.Hash]map[common.Hash][]byte), - accountsOrigin: make(map[common.Address][]byte), - storagesOrigin: make(map[common.Address]map[common.Hash][]byte), - stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)), - stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), - stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), - stateObjectsDestruct: make(map[common.Address]*types.StateAccount, len(s.stateObjectsDestruct)), - refund: s.refund, - logs: make(map[common.Hash][]*types.Log, len(s.logs)), - logSize: s.logSize, - preimages: make(map[common.Hash][]byte, len(s.preimages)), - journal: newJournal(), - hasher: crypto.NewKeccakState(), - - // In order for the block producer to be able to use and make additions - // to the snapshot tree, we need to copy that as well. Otherwise, any - // block mined by ourselves will cause gaps in the tree, and force the - // miner to operate trie-backed only. - snaps: s.snaps, - snap: s.snap, - } - // Copy the dirty states, logs, and preimages - for addr := range s.journal.dirties { - // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), - // and in the Finalise-method, there is a case where an object is in the journal but not - // in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for - // nil - if object, exist := s.stateObjects[addr]; exist { - // Even though the original object is dirty, we are not copying the journal, - // so we need to make sure that any side-effect the journal would have caused - // during a commit (or similar op) is already applied to the copy. - state.stateObjects[addr] = object.deepCopy(state) - - state.stateObjectsDirty[addr] = struct{}{} // Mark the copy dirty to force internal (code/state) commits - state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits - } - } - // Above, we don't copy the actual journal. This means that if the copy - // is copied, the loop above will be a no-op, since the copy's journal - // is empty. Thus, here we iterate over stateObjects, to enable copies - // of copies. - for addr := range s.stateObjectsPending { - if _, exist := state.stateObjects[addr]; !exist { - state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state) - } - state.stateObjectsPending[addr] = struct{}{} - } - for addr := range s.stateObjectsDirty { - if _, exist := state.stateObjects[addr]; !exist { - state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state) - } - state.stateObjectsDirty[addr] = struct{}{} - } - // Deep copy the destruction markers. - for addr, value := range s.stateObjectsDestruct { - state.stateObjectsDestruct[addr] = value - } - // Deep copy the state changes made in the scope of block - // along with their original values. - state.accounts = copySet(s.accounts) - state.storages = copy2DSet(s.storages) - state.accountsOrigin = copySet(state.accountsOrigin) - state.storagesOrigin = copy2DSet(state.storagesOrigin) - - // Deep copy the logs occurred in the scope of block - for hash, logs := range s.logs { - cpy := make([]*types.Log, len(logs)) - for i, l := range logs { - cpy[i] = new(types.Log) - *cpy[i] = *l - } - state.logs[hash] = cpy - } - // Deep copy the preimages occurred in the scope of block - for hash, preimage := range s.preimages { - state.preimages[hash] = preimage - } - // Do we need to copy the access list and transient storage? - // In practice: No. At the start of a transaction, these two lists are empty. - // In practice, we only ever copy state _between_ transactions/blocks, never - // in the middle of a transaction. However, it doesn't cost us much to copy - // empty lists, so we do it anyway to not blow up if we ever decide copy them - // in the middle of a transaction. - state.accessList = s.accessList.Copy() - state.transientStorage = s.transientStorage.Copy() - - // If there's a prefetcher running, make an inactive copy of it that can - // only access data but does not actively preload (since the user will not - // know that they need to explicitly terminate an active copy). - if s.prefetcher != nil { - state.prefetcher = s.prefetcher.copy() - } - return state -} - -// Snapshot returns an identifier for the current revision of the state. -func (s *StateDB) Snapshot() int { - id := s.nextRevisionId - s.nextRevisionId++ - s.validRevisions = append(s.validRevisions, revision{id, s.journal.length()}) - return id -} - -// RevertToSnapshot reverts all state changes made since the given revision. -func (s *StateDB) RevertToSnapshot(revid int) { - // Find the snapshot in the stack of valid snapshots. - idx := sort.Search(len(s.validRevisions), func(i int) bool { - return s.validRevisions[i].id >= revid - }) - if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid { - panic(fmt.Errorf("revision id %v cannot be reverted", revid)) - } - snapshot := s.validRevisions[idx].journalIndex - - // Replay the journal to undo changes and remove invalidated snapshots - s.journal.revert(s, snapshot) - s.validRevisions = s.validRevisions[:idx] -} - -// GetRefund returns the current value of the refund counter. -func (s *StateDB) GetRefund() uint64 { - return s.refund -} - -// Finalise finalises the state by removing the destructed objects and clears -// the journal as well as the refunds. Finalise, however, will not push any updates -// into the tries just yet. Only IntermediateRoot or Commit will do that. -func (s *StateDB) Finalise(deleteEmptyObjects bool) { - addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) - for addr := range s.journal.dirties { - obj, exist := s.stateObjects[addr] - if !exist { - // ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2 - // That tx goes out of gas, and although the notion of 'touched' does not exist there, the - // touch-event will still be recorded in the journal. Since ripeMD is a special snowflake, - // it will persist in the journal even though the journal is reverted. In this special circumstance, - // it may exist in `s.journal.dirties` but not in `s.stateObjects`. - // Thus, we can safely ignore it here - continue - } - if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) { - obj.deleted = true - - // We need to maintain account deletions explicitly (will remain - // set indefinitely). Note only the first occurred self-destruct - // event is tracked. - if _, ok := s.stateObjectsDestruct[obj.address]; !ok { - s.stateObjectsDestruct[obj.address] = obj.origin - } - // Note, we can't do this only at the end of a block because multiple - // transactions within the same block might self destruct and then - // resurrect an account; but the snapshotter needs both events. - delete(s.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) - delete(s.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) - delete(s.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect) - delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) - } else { - obj.finalise(true) // Prefetch slots in the background - } - obj.created = false - s.stateObjectsPending[addr] = struct{}{} - s.stateObjectsDirty[addr] = struct{}{} - - // At this point, also ship the address off to the precacher. The precacher - // will start loading tries, and when the change is eventually committed, - // the commit-phase will be a lot faster - addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure - } - if s.prefetcher != nil && len(addressesToPrefetch) > 0 { - s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch) - } - // Invalidate journal because reverting across transactions is not allowed. - s.clearJournalAndRefund() -} - -// IntermediateRoot computes the current root hash of the state trie. -// It is called in between transactions to get the root hash that -// goes into transaction receipts. -func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { - // Finalise all the dirty storage states and write them into the tries - s.Finalise(deleteEmptyObjects) - - // If there was a trie prefetcher operating, it gets aborted and irrevocably - // modified after we start retrieving tries. Remove it from the statedb after - // this round of use. - // - // This is weird pre-byzantium since the first tx runs with a prefetcher and - // the remainder without, but pre-byzantium even the initial prefetcher is - // useless, so no sleep lost. - prefetcher := s.prefetcher - if s.prefetcher != nil { - defer func() { - s.prefetcher.close() - s.prefetcher = nil - }() - } - // Although naively it makes sense to retrieve the account trie and then do - // the contract storage and account updates sequentially, that short circuits - // the account prefetcher. Instead, let's process all the storage updates - // first, giving the account prefetches just a few more milliseconds of time - // to pull useful data from disk. - for addr := range s.stateObjectsPending { - if obj := s.stateObjects[addr]; !obj.deleted { - obj.updateRoot() - } - } - // Now we're about to start to write changes to the trie. The trie is so far - // _untouched_. We can check with the prefetcher, if it can give us a trie - // which has the same root, but also has some content loaded into it. - if prefetcher != nil { - if trie := prefetcher.trie(common.Hash{}, s.originalRoot); trie != nil { - s.trie = trie - } - } - usedAddrs := make([][]byte, 0, len(s.stateObjectsPending)) - for addr := range s.stateObjectsPending { - if obj := s.stateObjects[addr]; obj.deleted { - s.deleteStateObject(obj) - s.AccountDeleted += 1 - } else { - s.updateStateObject(obj) - s.AccountUpdated += 1 - } - usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure - } - if prefetcher != nil { - prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) - } - if len(s.stateObjectsPending) > 0 { - s.stateObjectsPending = make(map[common.Address]struct{}) - } - // Track the amount of time wasted on hashing the account trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) - } - return s.trie.Hash() +func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { + NormalizeStateKey(&key) + s.StateDB.SetState(addr, key, value) } // SetTxContext sets the current transaction hash and index which are @@ -1001,443 +132,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { func (s *StateDB) SetTxContext(thash common.Hash, ti int) { s.thash = thash s.txIndex = ti -} - -func (s *StateDB) clearJournalAndRefund() { - if len(s.journal.entries) > 0 { - s.journal = newJournal() - s.refund = 0 - } - s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entries -} - -// fastDeleteStorage is the function that efficiently deletes the storage trie -// of a specific account. It leverages the associated state snapshot for fast -// storage iteration and constructs trie node deletion markers by creating -// stack trie with iterated slots. -func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) { - iter, err := s.snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{}) - if err != nil { - return false, 0, nil, nil, err - } - defer iter.Release() - - var ( - size common.StorageSize - nodes = trienode.NewNodeSet(addrHash) - slots = make(map[common.Hash][]byte) - ) - options := trie.NewStackTrieOptions() - options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) { - nodes.AddNode(path, trienode.NewDeleted()) - size += common.StorageSize(len(path)) - }) - stack := trie.NewStackTrie(options) - for iter.Next() { - if size > storageDeleteLimit { - return true, size, nil, nil, nil - } - slot := common.CopyBytes(iter.Slot()) - if err := iter.Error(); err != nil { // error might occur after Slot function - return false, 0, nil, nil, err - } - size += common.StorageSize(common.HashLength + len(slot)) - slots[iter.Hash()] = slot - - if err := stack.Update(iter.Hash().Bytes(), slot); err != nil { - return false, 0, nil, nil, err - } - } - if err := iter.Error(); err != nil { // error might occur during iteration - return false, 0, nil, nil, err - } - if stack.Hash() != root { - return false, 0, nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash()) - } - return false, size, slots, nodes, nil -} - -// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage," -// employed when the associated state snapshot is not available. It iterates the -// storage slots along with all internal trie nodes via trie directly. -func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) { - tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie) - if err != nil { - return false, 0, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err) - } - it, err := tr.NodeIterator(nil) - if err != nil { - return false, 0, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err) - } - var ( - size common.StorageSize - nodes = trienode.NewNodeSet(addrHash) - slots = make(map[common.Hash][]byte) - ) - for it.Next(true) { - if size > storageDeleteLimit { - return true, size, nil, nil, nil - } - if it.Leaf() { - slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob()) - size += common.StorageSize(common.HashLength + len(it.LeafBlob())) - continue - } - if it.Hash() == (common.Hash{}) { - continue - } - size += common.StorageSize(len(it.Path())) - nodes.AddNode(it.Path(), trienode.NewDeleted()) - } - if err := it.Error(); err != nil { - return false, 0, nil, nil, err - } - return false, size, slots, nodes, nil -} - -// deleteStorage is designed to delete the storage trie of a designated account. -// It could potentially be terminated if the storage size is excessively large, -// potentially leading to an out-of-memory panic. The function will make an attempt -// to utilize an efficient strategy if the associated state snapshot is reachable; -// otherwise, it will resort to a less-efficient approach. -func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) { - var ( - start = time.Now() - err error - aborted bool - size common.StorageSize - slots map[common.Hash][]byte - nodes *trienode.NodeSet - ) - // The fast approach can be failed if the snapshot is not fully - // generated, or it's internally corrupted. Fallback to the slow - // one just in case. - if s.snap != nil { - aborted, size, slots, nodes, err = s.fastDeleteStorage(addrHash, root) - } - if s.snap == nil || err != nil { - aborted, size, slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root) - } - if err != nil { - return false, nil, nil, err - } - if metrics.EnabledExpensive { - if aborted { - slotDeletionSkip.Inc(1) - } - n := int64(len(slots)) - - slotDeletionMaxCount.UpdateIfGt(int64(len(slots))) - slotDeletionMaxSize.UpdateIfGt(int64(size)) - - slotDeletionTimer.UpdateSince(start) - slotDeletionCount.Mark(n) - slotDeletionSize.Mark(int64(size)) - } - return aborted, slots, nodes, nil -} - -// handleDestruction processes all destruction markers and deletes the account -// and associated storage slots if necessary. There are four possible situations -// here: -// -// - the account was not existent and be marked as destructed -// -// - the account was not existent and be marked as destructed, -// however, it's resurrected later in the same block. -// -// - the account was existent and be marked as destructed -// -// - the account was existent and be marked as destructed, -// however it's resurrected later in the same block. -// -// In case (a), nothing needs be deleted, nil to nil transition can be ignored. -// -// In case (b), nothing needs be deleted, nil is used as the original value for -// newly created account and storages -// -// In case (c), **original** account along with its storages should be deleted, -// with their values be tracked as original value. -// -// In case (d), **original** account along with its storages should be deleted, -// with their values be tracked as original value. -func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.Address]struct{}, error) { - // Short circuit if geth is running with hash mode. This procedure can consume - // considerable time and storage deletion isn't supported in hash mode, thus - // preemptively avoiding unnecessary expenses. - incomplete := make(map[common.Address]struct{}) - if s.db.TrieDB().Scheme() == rawdb.HashScheme { - return incomplete, nil - } - for addr, prev := range s.stateObjectsDestruct { - // The original account was non-existing, and it's marked as destructed - // in the scope of block. It can be case (a) or (b). - // - for (a), skip it without doing anything. - // - for (b), track account's original value as nil. It may overwrite - // the data cached in s.accountsOrigin set by 'updateStateObject'. - addrHash := crypto.Keccak256Hash(addr[:]) - if prev == nil { - if _, ok := s.accounts[addrHash]; ok { - s.accountsOrigin[addr] = nil // case (b) - } - continue - } - // It can overwrite the data in s.accountsOrigin set by 'updateStateObject'. - s.accountsOrigin[addr] = types.SlimAccountRLP(*prev) // case (c) or (d) - - // Short circuit if the storage was empty. - if prev.Root == types.EmptyRootHash { - continue - } - // Remove storage slots belong to the account. - aborted, slots, set, err := s.deleteStorage(addr, addrHash, prev.Root) - if err != nil { - return nil, fmt.Errorf("failed to delete storage, err: %w", err) - } - // The storage is too huge to handle, skip it but mark as incomplete. - // For case (d), the account is resurrected might with a few slots - // created. In this case, wipe the entire storage state diff because - // of aborted deletion. - if aborted { - incomplete[addr] = struct{}{} - delete(s.storagesOrigin, addr) - continue - } - if s.storagesOrigin[addr] == nil { - s.storagesOrigin[addr] = slots - } else { - // It can overwrite the data in s.storagesOrigin[addrHash] set by - // 'object.updateTrie'. - for key, val := range slots { - s.storagesOrigin[addr][key] = val - } - } - if err := nodes.Merge(set); err != nil { - return nil, err - } - } - return incomplete, nil -} - -// Commit writes the state to the underlying in-memory trie database. -// Once the state is committed, tries cached in stateDB (including account -// trie, storage tries) will no longer be functional. A new state instance -// must be created with new root and updated database for accessing post- -// commit states. -// -// The associated block number of the state transition is also provided -// for more chain context. -func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, error) { - // Short circuit in case any database failure occurred earlier. - if s.dbErr != nil { - return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) - } - // Finalize any pending changes and merge everything into the tries - s.IntermediateRoot(deleteEmptyObjects) - - // Commit objects to the trie, measuring the elapsed time - var ( - accountTrieNodesUpdated int - accountTrieNodesDeleted int - storageTrieNodesUpdated int - storageTrieNodesDeleted int - nodes = trienode.NewMergedNodeSet() - codeWriter = s.db.DiskDB().NewBatch() - ) - // Handle all state deletions first - incomplete, err := s.handleDestruction(nodes) - if err != nil { - return common.Hash{}, err - } - // Handle all state updates afterwards - for addr := range s.stateObjectsDirty { - obj := s.stateObjects[addr] - if obj.deleted { - continue - } - // Write any contract code associated with the state object - if obj.code != nil && obj.dirtyCode { - rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) - obj.dirtyCode = false - } - // Write any storage changes in the state object to its storage trie - set, err := obj.commit() - if err != nil { - return common.Hash{}, err - } - // Merge the dirty nodes of storage trie into global set. It is possible - // that the account was destructed and then resurrected in the same block. - // In this case, the node set is shared by both accounts. - if set != nil { - if err := nodes.Merge(set); err != nil { - return common.Hash{}, err - } - updates, deleted := set.Size() - storageTrieNodesUpdated += updates - storageTrieNodesDeleted += deleted - } - } - if codeWriter.ValueSize() > 0 { - if err := codeWriter.Write(); err != nil { - log.Crit("Failed to commit dirty codes", "error", err) - } - } - // Write the account trie changes, measuring the amount of wasted time - var start time.Time - if metrics.EnabledExpensive { - start = time.Now() - } - root, set, err := s.trie.Commit(true) - if err != nil { - return common.Hash{}, err - } - // Merge the dirty nodes of account trie into global set - if set != nil { - if err := nodes.Merge(set); err != nil { - return common.Hash{}, err - } - accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size() - } - if metrics.EnabledExpensive { - s.AccountCommits += time.Since(start) - - accountUpdatedMeter.Mark(int64(s.AccountUpdated)) - storageUpdatedMeter.Mark(int64(s.StorageUpdated)) - accountDeletedMeter.Mark(int64(s.AccountDeleted)) - storageDeletedMeter.Mark(int64(s.StorageDeleted)) - accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated)) - accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted)) - storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated)) - storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted)) - s.AccountUpdated, s.AccountDeleted = 0, 0 - s.StorageUpdated, s.StorageDeleted = 0, 0 - } - // If snapshotting is enabled, update the snapshot tree with this new version - if s.snap != nil { - start := time.Now() - // Only update if there's a state transition (skip empty Clique blocks) - if parent := s.snap.Root(); parent != root { - if err := s.snaps.Update(root, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages); err != nil { - log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err) - } - // Keep 128 diff layers in the memory, persistent layer is 129th. - // - head layer is paired with HEAD state - // - head-1 layer is paired with HEAD-1 state - // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state - if err := s.snaps.Cap(root, 128); err != nil { - log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err) - } - } - if metrics.EnabledExpensive { - s.SnapshotCommits += time.Since(start) - } - s.snap = nil - } - if root == (common.Hash{}) { - root = types.EmptyRootHash - } - origin := s.originalRoot - if origin == (common.Hash{}) { - origin = types.EmptyRootHash - } - if root != origin { - start := time.Now() - set := triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete) - if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil { - return common.Hash{}, err - } - s.originalRoot = root - if metrics.EnabledExpensive { - s.TrieDBCommits += time.Since(start) - } - if s.onCommit != nil { - s.onCommit(set) - } - } - // Clear all internal flags at the end of commit operation. - s.accounts = make(map[common.Hash][]byte) - s.storages = make(map[common.Hash]map[common.Hash][]byte) - s.accountsOrigin = make(map[common.Address][]byte) - s.storagesOrigin = make(map[common.Address]map[common.Hash][]byte) - s.stateObjectsDirty = make(map[common.Address]struct{}) - s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount) - return root, nil -} - -// Prepare handles the preparatory steps for executing a state transition with. -// This method must be invoked before state transition. -// -// Berlin fork: -// - Add sender to access list (2929) -// - Add destination to access list (2929) -// - Add precompiles to access list (2929) -// - Add the contents of the optional tx access list (2930) -// -// Potential EIPs: -// - Reset access list (Berlin) -// - Add coinbase to access list (EIP-3651) -// - Reset transient storage (EIP-1153) -func (s *StateDB) Prepare(rules params.Rules, sender, coinbase common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { - if rules.IsBerlin { - // Clear out any leftover from previous executions - al := newAccessList() - s.accessList = al - - al.AddAddress(sender) - if dst != nil { - al.AddAddress(*dst) - // If it's a create-tx, the destination will be added inside evm.create - } - for _, addr := range precompiles { - al.AddAddress(addr) - } - for _, el := range list { - al.AddAddress(el.Address) - for _, key := range el.StorageKeys { - al.AddSlot(el.Address, key) - } - } - if rules.IsShanghai { // EIP-3651: warm coinbase - al.AddAddress(coinbase) - } - } - // Reset transient storage at the beginning of transaction execution - s.transientStorage = newTransientStorage() -} - -// AddAddressToAccessList adds the given address to the access list -func (s *StateDB) AddAddressToAccessList(addr common.Address) { - if s.accessList.AddAddress(addr) { - s.journal.append(accessListAddAccountChange{&addr}) - } -} - -// AddSlotToAccessList adds the given (address, slot)-tuple to the access list -func (s *StateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) { - addrMod, slotMod := s.accessList.AddSlot(addr, slot) - if addrMod { - // In practice, this should not happen, since there is no way to enter the - // scope of 'address' without having the 'address' become already added - // to the access list (via call-variant, create, etc). - // Better safe than sorry, though - s.journal.append(accessListAddAccountChange{&addr}) - } - if slotMod { - s.journal.append(accessListAddSlotChange{ - address: &addr, - slot: &slot, - }) - } -} - -// AddressInAccessList returns true if the given address is in the access list. -func (s *StateDB) AddressInAccessList(addr common.Address) bool { - return s.accessList.ContainsAddress(addr) -} - -// SlotInAccessList returns true if the given (address, slot)-tuple is in the access list. -func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) { - return s.accessList.Contains(addr, slot) + s.StateDB.SetTxContext(thash, ti) } // GetTxHash returns the current tx hash on the StateDB set by SetTxContext. @@ -1445,37 +140,28 @@ func (s *StateDB) GetTxHash() common.Hash { return s.thash } -// convertAccountSet converts a provided account set from address keyed to hash keyed. -func (s *StateDB) convertAccountSet(set map[common.Address]*types.StateAccount) map[common.Hash]struct{} { - ret := make(map[common.Hash]struct{}, len(set)) - for addr := range set { - obj, exist := s.stateObjects[addr] - if !exist { - ret[crypto.Keccak256Hash(addr[:])] = struct{}{} - } else { - ret[obj.addrHash] = struct{}{} - } +func (s *StateDB) Copy() *StateDB { + return &StateDB{ + StateDB: s.StateDB.Copy(), + db: s.db, + snaps: s.snaps, + thash: s.thash, + txIndex: s.txIndex, } - return ret } -// copySet returns a deep-copied set. -func copySet[k comparable](set map[k][]byte) map[k][]byte { - copied := make(map[k][]byte, len(set)) - for key, val := range set { - copied[key] = common.CopyBytes(val) - } - return copied +// NormalizeCoinID ORs the 0th bit of the first byte in +// [coinID], which ensures this bit will be 1 and all other +// bits are left the same. +// This partitions multicoin storage from normal state storage. +func NormalizeCoinID(coinID *common.Hash) { + coinID[0] |= 0x01 } -// copy2DSet returns a two-dimensional deep-copied set. -func copy2DSet[k comparable](set map[k]map[common.Hash][]byte) map[k]map[common.Hash][]byte { - copied := make(map[k]map[common.Hash][]byte, len(set)) - for addr, subset := range set { - copied[addr] = make(map[common.Hash][]byte, len(subset)) - for key, val := range subset { - copied[addr][key] = common.CopyBytes(val) - } - } - return copied +// NormalizeStateKey ANDs the 0th bit of the first byte in +// [key], which ensures this bit will be 0 and all other bits +// are left the same. +// This partitions normal state storage from multicoin storage. +func NormalizeStateKey(key *common.Hash) { + key[0] &= 0xfe } diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go deleted file mode 100644 index 8d66eafe82..0000000000 --- a/core/state/statedb_fuzz_test.go +++ /dev/null @@ -1,405 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see - -package state - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "math" - "math/rand" - "reflect" - "strings" - "testing" - "testing/quick" - - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state/snapshot" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/triedb/pathdb" - "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/crypto" - "github.com/ava-labs/libevm/rlp" - "github.com/ava-labs/libevm/trie" - "github.com/ava-labs/libevm/trie/triestate" - "github.com/ava-labs/libevm/triedb" - "github.com/holiman/uint256" -) - -// A stateTest checks that the state changes are correctly captured. Instances -// of this test with pseudorandom content are created by Generate. -// -// The test works as follows: -// -// A list of states are created by applying actions. The state changes between -// each state instance are tracked and be verified. -type stateTest struct { - addrs []common.Address // all account addresses - actions [][]testAction // modifications to the state, grouped by block - chunk int // The number of actions per chunk - err error // failure details are reported through this field -} - -// newStateTestAction creates a random action that changes state. -func newStateTestAction(addr common.Address, r *rand.Rand, index int) testAction { - actions := []testAction{ - { - name: "SetBalance", - fn: func(a testAction, s *StateDB) { - s.SetBalance(addr, uint256.NewInt(uint64(a.args[0]))) - }, - args: make([]int64, 1), - }, - { - name: "SetNonce", - fn: func(a testAction, s *StateDB) { - s.SetNonce(addr, uint64(a.args[0])) - }, - args: make([]int64, 1), - }, - { - name: "SetState", - fn: func(a testAction, s *StateDB) { - var key, val common.Hash - binary.BigEndian.PutUint16(key[:], uint16(a.args[0])) - binary.BigEndian.PutUint16(val[:], uint16(a.args[1])) - s.SetState(addr, key, val) - }, - args: make([]int64, 2), - }, - { - name: "SetCode", - fn: func(a testAction, s *StateDB) { - code := make([]byte, 16) - binary.BigEndian.PutUint64(code, uint64(a.args[0])) - binary.BigEndian.PutUint64(code[8:], uint64(a.args[1])) - s.SetCode(addr, code) - }, - args: make([]int64, 2), - }, - { - name: "CreateAccount", - fn: func(a testAction, s *StateDB) { - s.CreateAccount(addr) - }, - }, - { - name: "Selfdestruct", - fn: func(a testAction, s *StateDB) { - s.SelfDestruct(addr) - }, - }, - } - var nonRandom = index != -1 - if index == -1 { - index = r.Intn(len(actions)) - } - action := actions[index] - var names []string - if !action.noAddr { - names = append(names, addr.Hex()) - } - for i := range action.args { - if nonRandom { - action.args[i] = rand.Int63n(10000) + 1 // set balance to non-zero - } else { - action.args[i] = rand.Int63n(10000) - } - names = append(names, fmt.Sprint(action.args[i])) - } - action.name += " " + strings.Join(names, ", ") - return action -} - -// Generate returns a new snapshot test of the given size. All randomness is -// derived from r. -func (*stateTest) Generate(r *rand.Rand, size int) reflect.Value { - addrs := make([]common.Address, 5) - for i := range addrs { - addrs[i][0] = byte(i) - } - actions := make([][]testAction, rand.Intn(5)+1) - - for i := 0; i < len(actions); i++ { - actions[i] = make([]testAction, size) - for j := range actions[i] { - if j == 0 { - // Always include a set balance action to make sure - // the state changes are not empty. - actions[i][j] = newStateTestAction(common.HexToAddress("0xdeadbeef"), r, 0) - continue - } - actions[i][j] = newStateTestAction(addrs[r.Intn(len(addrs))], r, -1) - } - } - chunk := int(math.Sqrt(float64(size))) - if size > 0 && chunk == 0 { - chunk = 1 - } - return reflect.ValueOf(&stateTest{ - addrs: addrs, - actions: actions, - chunk: chunk, - }) -} - -func (test *stateTest) String() string { - out := new(bytes.Buffer) - for i, actions := range test.actions { - fmt.Fprintf(out, "---- block %d ----\n", i) - for j, action := range actions { - if j%test.chunk == 0 { - fmt.Fprintf(out, "---- transaction %d ----\n", j/test.chunk) - } - fmt.Fprintf(out, "%4d: %s\n", j%test.chunk, action.name) - } - } - return out.String() -} - -func (test *stateTest) run() bool { - var ( - roots []common.Hash - accountList []map[common.Address][]byte - storageList []map[common.Address]map[common.Hash][]byte - onCommit = func(states *triestate.Set) { - accountList = append(accountList, copySet(states.Accounts)) - storageList = append(storageList, copy2DSet(states.Storages)) - } - disk = rawdb.NewMemoryDatabase() - tdb = triedb.NewDatabase(disk, &triedb.Config{DBOverride: pathdb.Defaults.BackendConstructor}) - sdb = NewDatabaseWithNodeDB(disk, tdb) - byzantium = rand.Intn(2) == 0 - ) - defer disk.Close() - defer tdb.Close() - - var snaps *snapshot.Tree - if rand.Intn(3) == 0 { - snaps, _ = snapshot.New(snapshot.Config{ - CacheSize: 1, - NoBuild: false, - AsyncBuild: false, - }, disk, tdb, common.Hash{}, types.EmptyRootHash) - } - for i, actions := range test.actions { - root := types.EmptyRootHash - if i != 0 { - root = roots[len(roots)-1] - } - state, err := New(root, sdb, snaps) - if err != nil { - panic(err) - } - state.onCommit = onCommit - - for i, action := range actions { - if i%test.chunk == 0 && i != 0 { - if byzantium { - state.Finalise(true) // call finalise at the transaction boundary - } else { - state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary - } - } - action.fn(action, state) - } - if byzantium { - state.Finalise(true) // call finalise at the transaction boundary - } else { - state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary - } - if snaps != nil { - snaps.WithBlockHashes(common.Hash{}, common.Hash{}) - } - nroot, err := state.Commit(0, true) // call commit at the block boundary - if err != nil { - panic(err) - } - if nroot == root { - return true // filter out non-change state transition - } - roots = append(roots, nroot) - } - for i := 0; i < len(test.actions); i++ { - root := types.EmptyRootHash - if i != 0 { - root = roots[i-1] - } - test.err = test.verify(root, roots[i], tdb, accountList[i], storageList[i]) - if test.err != nil { - return false - } - } - return true -} - -// verifyAccountCreation this function is called once the state diff says that -// specific account was not present. A serial of checks will be performed to -// ensure the state diff is correct, includes: -// -// - the account was indeed not present in trie -// - the account is present in new trie, nil->nil is regarded as invalid -// - the slots transition is correct -func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, slots map[common.Hash][]byte) error { - // Verify account change - addrHash := crypto.Keccak256Hash(addr.Bytes()) - oBlob, err := otr.Get(addrHash.Bytes()) - if err != nil { - return err - } - nBlob, err := ntr.Get(addrHash.Bytes()) - if err != nil { - return err - } - if len(oBlob) != 0 { - return fmt.Errorf("unexpected account in old trie, %x", addrHash) - } - if len(nBlob) == 0 { - return fmt.Errorf("missing account in new trie, %x", addrHash) - } - - // Verify storage changes - var nAcct types.StateAccount - if err := rlp.DecodeBytes(nBlob, &nAcct); err != nil { - return err - } - // Account has no slot, empty slot set is expected - if nAcct.Root == types.EmptyRootHash { - if len(slots) != 0 { - return fmt.Errorf("unexpected slot changes %x", addrHash) - } - return nil - } - // Account has slots, ensure all new slots are contained - st, err := trie.New(trie.StorageTrieID(next, addrHash, nAcct.Root), db) - if err != nil { - return err - } - for key, val := range slots { - st.Update(key.Bytes(), val) - } - if st.Hash() != types.EmptyRootHash { - return errors.New("invalid slot changes") - } - return nil -} - -// verifyAccountUpdate this function is called once the state diff says that -// specific account was present. A serial of checks will be performed to -// ensure the state diff is correct, includes: -// -// - the account was indeed present in trie -// - the account in old trie matches the provided value -// - the slots transition is correct -func (test *stateTest) verifyAccountUpdate(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, origin []byte, slots map[common.Hash][]byte) error { - // Verify account change - addrHash := crypto.Keccak256Hash(addr.Bytes()) - oBlob, err := otr.Get(addrHash.Bytes()) - if err != nil { - return err - } - nBlob, err := ntr.Get(addrHash.Bytes()) - if err != nil { - return err - } - if len(oBlob) == 0 { - return fmt.Errorf("missing account in old trie, %x", addrHash) - } - full, err := types.FullAccountRLP(origin) - if err != nil { - return err - } - if !bytes.Equal(full, oBlob) { - return fmt.Errorf("account value is not matched, %x", addrHash) - } - - // Decode accounts - var ( - oAcct types.StateAccount - nAcct types.StateAccount - nRoot common.Hash - ) - if err := rlp.DecodeBytes(oBlob, &oAcct); err != nil { - return err - } - if len(nBlob) == 0 { - nRoot = types.EmptyRootHash - } else { - if err := rlp.DecodeBytes(nBlob, &nAcct); err != nil { - return err - } - nRoot = nAcct.Root - } - - // Verify storage - st, err := trie.New(trie.StorageTrieID(next, addrHash, nRoot), db) - if err != nil { - return err - } - for key, val := range slots { - st.Update(key.Bytes(), val) - } - if st.Hash() != oAcct.Root { - return errors.New("invalid slot changes") - } - return nil -} - -func (test *stateTest) verify(root common.Hash, next common.Hash, db *triedb.Database, accountsOrigin map[common.Address][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error { - otr, err := trie.New(trie.StateTrieID(root), db) - if err != nil { - return err - } - ntr, err := trie.New(trie.StateTrieID(next), db) - if err != nil { - return err - } - for addr, account := range accountsOrigin { - var err error - if len(account) == 0 { - err = test.verifyAccountCreation(next, db, otr, ntr, addr, storagesOrigin[addr]) - } else { - err = test.verifyAccountUpdate(next, db, otr, ntr, addr, accountsOrigin[addr], storagesOrigin[addr]) - } - if err != nil { - return err - } - } - return nil -} - -func TestStateChanges(t *testing.T) { - config := &quick.Config{MaxCount: 1000} - err := quick.Check((*stateTest).run, config) - if cerr, ok := err.(*quick.CheckError); ok { - test := cerr.In[0].(*stateTest) - t.Errorf("%v:\n%s", test.err, test) - } else if err != nil { - t.Error(err) - } -} diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 50a3a01bb3..a21c2f57de 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -36,7 +36,6 @@ import ( "math/rand" "reflect" "strings" - "sync" "testing" "testing/quick" @@ -49,7 +48,6 @@ import ( "github.com/ava-labs/libevm/crypto" "github.com/ava-labs/libevm/rlp" "github.com/ava-labs/libevm/trie" - "github.com/ava-labs/libevm/trie/trienode" "github.com/ava-labs/libevm/triedb" "github.com/holiman/uint256" ) @@ -169,72 +167,6 @@ func TestIntermediateLeaks(t *testing.T) { } } -// TestCopy tests that copying a StateDB object indeed makes the original and -// the copy independent of each other. This test is a regression test against -// https://github.com/ethereum/go-ethereum/pull/15549. -func TestCopy(t *testing.T) { - // Create a random state test to copy and modify "independently" - orig, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) - - for i := byte(0); i < 255; i++ { - obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i})) - obj.AddBalance(uint256.NewInt(uint64(i))) - orig.updateStateObject(obj) - } - orig.Finalise(false) - - // Copy the state - copy := orig.Copy() - - // Copy the copy state - ccopy := copy.Copy() - - // modify all in memory - for i := byte(0); i < 255; i++ { - origObj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i})) - copyObj := copy.getOrNewStateObject(common.BytesToAddress([]byte{i})) - ccopyObj := ccopy.getOrNewStateObject(common.BytesToAddress([]byte{i})) - - origObj.AddBalance(uint256.NewInt(2 * uint64(i))) - copyObj.AddBalance(uint256.NewInt(3 * uint64(i))) - ccopyObj.AddBalance(uint256.NewInt(4 * uint64(i))) - - orig.updateStateObject(origObj) - copy.updateStateObject(copyObj) - ccopy.updateStateObject(copyObj) - } - - // Finalise the changes on all concurrently - finalise := func(wg *sync.WaitGroup, db *StateDB) { - defer wg.Done() - db.Finalise(true) - } - - var wg sync.WaitGroup - wg.Add(3) - go finalise(&wg, orig) - go finalise(&wg, copy) - go finalise(&wg, ccopy) - wg.Wait() - - // Verify that the three states have been updated independently - for i := byte(0); i < 255; i++ { - origObj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i})) - copyObj := copy.getOrNewStateObject(common.BytesToAddress([]byte{i})) - ccopyObj := ccopy.getOrNewStateObject(common.BytesToAddress([]byte{i})) - - if want := uint256.NewInt(3 * uint64(i)); origObj.Balance().Cmp(want) != 0 { - t.Errorf("orig obj %d: balance mismatch: have %v, want %v", i, origObj.Balance(), want) - } - if want := uint256.NewInt(4 * uint64(i)); copyObj.Balance().Cmp(want) != 0 { - t.Errorf("copy obj %d: balance mismatch: have %v, want %v", i, copyObj.Balance(), want) - } - if want := uint256.NewInt(5 * uint64(i)); ccopyObj.Balance().Cmp(want) != 0 { - t.Errorf("copy obj %d: balance mismatch: have %v, want %v", i, ccopyObj.Balance(), want) - } - } -} - func TestSnapshotRandom(t *testing.T) { config := &quick.Config{MaxCount: 1000} err := quick.Check((*snapshotTest).run, config) @@ -459,43 +391,6 @@ func (test *snapshotTest) run() bool { return true } -func forEachStorage(s *StateDB, addr common.Address, cb func(key, value common.Hash) bool) error { - so := s.getStateObject(addr) - if so == nil { - return nil - } - tr, err := so.getTrie() - if err != nil { - return err - } - trieIt, err := tr.NodeIterator(nil) - if err != nil { - return err - } - it := trie.NewIterator(trieIt) - - for it.Next() { - key := common.BytesToHash(s.trie.GetKey(it.Key)) - if value, dirty := so.dirtyStorage[key]; dirty { - if !cb(key, value) { - return nil - } - continue - } - - if len(it.Value) > 0 { - _, content, _, err := rlp.Split(it.Value) - if err != nil { - return err - } - if !cb(key, common.BytesToHash(content)) { - return nil - } - } - } - return nil -} - // checkEqual checks that methods of state and checkstate return the same values. func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { for _, addr := range test.addrs { @@ -515,15 +410,16 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { checkeq("GetCode", state.GetCode(addr), checkstate.GetCode(addr)) checkeq("GetCodeHash", state.GetCodeHash(addr), checkstate.GetCodeHash(addr)) checkeq("GetCodeSize", state.GetCodeSize(addr), checkstate.GetCodeSize(addr)) + // XXX: Can this be restored? // Check storage. - if obj := state.getStateObject(addr); obj != nil { - forEachStorage(state, addr, func(key, value common.Hash) bool { - return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value) - }) - forEachStorage(checkstate, addr, func(key, value common.Hash) bool { - return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value) - }) - } + // if obj := state.getStateObject(addr); obj != nil { + // forEachStorage(state, addr, func(key, value common.Hash) bool { + // return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value) + // }) + // forEachStorage(checkstate, addr, func(key, value common.Hash) bool { + // return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value) + // }) + // } if err != nil { return err } @@ -540,24 +436,6 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { return nil } -func TestTouchDelete(t *testing.T) { - s := newStateEnv() - s.state.getOrNewStateObject(common.Address{}) - root, _ := s.state.Commit(0, false) - s.state, _ = New(root, s.state.db, s.state.snaps) - - snapshot := s.state.Snapshot() - s.state.AddBalance(common.Address{}, new(uint256.Int)) - - if len(s.state.journal.dirties) != 1 { - t.Fatal("expected one dirty state object") - } - s.state.RevertToSnapshot(snapshot) - if len(s.state.journal.dirties) != 0 { - t.Fatal("expected no dirty state object") - } -} - // TestCopyOfCopy tests that modified objects are carried over to the copy, and the copy of the copy. // See https://github.com/ethereum/go-ethereum/pull/15225#issuecomment-380191512 func TestCopyOfCopy(t *testing.T) { @@ -764,41 +642,6 @@ func TestCommitCopy(t *testing.T) { } } -// TestDeleteCreateRevert tests a weird state transition corner case that we hit -// while changing the internals of StateDB. The workflow is that a contract is -// self-destructed, then in a follow-up transaction (but same block) it's created -// again and the transaction reverted. -// -// The original StateDB implementation flushed dirty objects to the tries after -// each transaction, so this works ok. The rework accumulated writes in memory -// first, but the journal wiped the entire state object on create-revert. -func TestDeleteCreateRevert(t *testing.T) { - // Create an initial state with a single contract - state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) - - addr := common.BytesToAddress([]byte("so")) - state.SetBalance(addr, uint256.NewInt(1)) - - root, _ := state.Commit(0, false) - state, _ = New(root, state.db, state.snaps) - - // Simulate self-destructing in one transaction, then create-reverting in another - state.SelfDestruct(addr) - state.Finalise(true) - - id := state.Snapshot() - state.SetBalance(addr, uint256.NewInt(2)) - state.RevertToSnapshot(id) - - // Commit the entire state and make sure we don't crash and have the correct state - root, _ = state.Commit(0, true) - state, _ = New(root, state.db, state.snaps) - - if state.getStateObject(addr) != nil { - t.Fatalf("self-destructed contract came alive") - } -} - // TestMissingTrieNodes tests that if the StateDB fails to load parts of the trie, // the Commit operation fails with an error // If we are missing trie nodes, we should not continue writing to the trie @@ -864,186 +707,11 @@ func testMissingTrieNodes(t *testing.T, scheme string) { } } -func TestStateDBAccessList(t *testing.T) { - // Some helpers - addr := func(a string) common.Address { - return common.HexToAddress(a) - } - slot := func(a string) common.Hash { - return common.HexToHash(a) - } - - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) - state, _ := New(types.EmptyRootHash, db, nil) - state.accessList = newAccessList() - - verifyAddrs := func(astrings ...string) { - t.Helper() - // convert to common.Address form - var addresses []common.Address - var addressMap = make(map[common.Address]struct{}) - for _, astring := range astrings { - address := addr(astring) - addresses = append(addresses, address) - addressMap[address] = struct{}{} - } - // Check that the given addresses are in the access list - for _, address := range addresses { - if !state.AddressInAccessList(address) { - t.Fatalf("expected %x to be in access list", address) - } - } - // Check that only the expected addresses are present in the access list - for address := range state.accessList.addresses { - if _, exist := addressMap[address]; !exist { - t.Fatalf("extra address %x in access list", address) - } - } - } - verifySlots := func(addrString string, slotStrings ...string) { - if !state.AddressInAccessList(addr(addrString)) { - t.Fatalf("scope missing address/slots %v", addrString) - } - var address = addr(addrString) - // convert to common.Hash form - var slots []common.Hash - var slotMap = make(map[common.Hash]struct{}) - for _, slotString := range slotStrings { - s := slot(slotString) - slots = append(slots, s) - slotMap[s] = struct{}{} - } - // Check that the expected items are in the access list - for i, s := range slots { - if _, slotPresent := state.SlotInAccessList(address, s); !slotPresent { - t.Fatalf("input %d: scope missing slot %v (address %v)", i, s, addrString) - } - } - // Check that no extra elements are in the access list - index := state.accessList.addresses[address] - if index >= 0 { - stateSlots := state.accessList.slots[index] - for s := range stateSlots { - if _, slotPresent := slotMap[s]; !slotPresent { - t.Fatalf("scope has extra slot %v (address %v)", s, addrString) - } - } - } - } - - state.AddAddressToAccessList(addr("aa")) // 1 - state.AddSlotToAccessList(addr("bb"), slot("01")) // 2,3 - state.AddSlotToAccessList(addr("bb"), slot("02")) // 4 - verifyAddrs("aa", "bb") - verifySlots("bb", "01", "02") - - // Make a copy - stateCopy1 := state.Copy() - if exp, got := 4, state.journal.length(); exp != got { - t.Fatalf("journal length mismatch: have %d, want %d", got, exp) - } - - // same again, should cause no journal entries - state.AddSlotToAccessList(addr("bb"), slot("01")) - state.AddSlotToAccessList(addr("bb"), slot("02")) - state.AddAddressToAccessList(addr("aa")) - if exp, got := 4, state.journal.length(); exp != got { - t.Fatalf("journal length mismatch: have %d, want %d", got, exp) - } - // some new ones - state.AddSlotToAccessList(addr("bb"), slot("03")) // 5 - state.AddSlotToAccessList(addr("aa"), slot("01")) // 6 - state.AddSlotToAccessList(addr("cc"), slot("01")) // 7,8 - state.AddAddressToAccessList(addr("cc")) - if exp, got := 8, state.journal.length(); exp != got { - t.Fatalf("journal length mismatch: have %d, want %d", got, exp) - } - - verifyAddrs("aa", "bb", "cc") - verifySlots("aa", "01") - verifySlots("bb", "01", "02", "03") - verifySlots("cc", "01") - - // now start rolling back changes - state.journal.revert(state, 7) - if _, ok := state.SlotInAccessList(addr("cc"), slot("01")); ok { - t.Fatalf("slot present, expected missing") - } - verifyAddrs("aa", "bb", "cc") - verifySlots("aa", "01") - verifySlots("bb", "01", "02", "03") - - state.journal.revert(state, 6) - if state.AddressInAccessList(addr("cc")) { - t.Fatalf("addr present, expected missing") - } - verifyAddrs("aa", "bb") - verifySlots("aa", "01") - verifySlots("bb", "01", "02", "03") - - state.journal.revert(state, 5) - if _, ok := state.SlotInAccessList(addr("aa"), slot("01")); ok { - t.Fatalf("slot present, expected missing") - } - verifyAddrs("aa", "bb") - verifySlots("bb", "01", "02", "03") - - state.journal.revert(state, 4) - if _, ok := state.SlotInAccessList(addr("bb"), slot("03")); ok { - t.Fatalf("slot present, expected missing") - } - verifyAddrs("aa", "bb") - verifySlots("bb", "01", "02") - - state.journal.revert(state, 3) - if _, ok := state.SlotInAccessList(addr("bb"), slot("02")); ok { - t.Fatalf("slot present, expected missing") - } - verifyAddrs("aa", "bb") - verifySlots("bb", "01") - - state.journal.revert(state, 2) - if _, ok := state.SlotInAccessList(addr("bb"), slot("01")); ok { - t.Fatalf("slot present, expected missing") - } - verifyAddrs("aa", "bb") - - state.journal.revert(state, 1) - if state.AddressInAccessList(addr("bb")) { - t.Fatalf("addr present, expected missing") - } - verifyAddrs("aa") - - state.journal.revert(state, 0) - if state.AddressInAccessList(addr("aa")) { - t.Fatalf("addr present, expected missing") - } - if got, exp := len(state.accessList.addresses), 0; got != exp { - t.Fatalf("expected empty, got %d", got) - } - if got, exp := len(state.accessList.slots), 0; got != exp { - t.Fatalf("expected empty, got %d", got) - } - // Check the copy - // Make a copy - state = stateCopy1 - verifyAddrs("aa", "bb") - verifySlots("bb", "01", "02") - if got, exp := len(state.accessList.addresses), 2; got != exp { - t.Fatalf("expected empty, got %d", got) - } - if got, exp := len(state.accessList.slots), 1; got != exp { - t.Fatalf("expected empty, got %d", got) - } -} - func TestMultiCoinOperations(t *testing.T) { s := newStateEnv() addr := common.Address{1} assetID := common.Hash{2} - s.state.getOrNewStateObject(addr) root, _ := s.state.Commit(0, false) s.state, _ = New(root, s.state.db, s.state.snaps) @@ -1054,7 +722,7 @@ func TestMultiCoinOperations(t *testing.T) { t.Fatal("expected zero multicoin balance") } - s.state.SetBalanceMultiCoin(addr, assetID, big.NewInt(10)) + s.state.AddBalanceMultiCoin(addr, assetID, big.NewInt(10)) s.state.SubBalanceMultiCoin(addr, assetID, big.NewInt(5)) s.state.AddBalanceMultiCoin(addr, assetID, big.NewInt(3)) @@ -1101,16 +769,14 @@ func TestMultiCoinSnapshot(t *testing.T) { assertBalances(10, 0, 0) // Commit and get the new root - snapTree.WithBlockHashes(common.Hash{}, common.Hash{}) - root, _ = stateDB.Commit(0, false) + root, _ = stateDB.Commit(0, false, snapshot.WithBlockHashes(common.Hash{}, common.Hash{})) assertBalances(10, 0, 0) // Create a new state from the latest root, add a multicoin balance, and // commit it to the tree. stateDB, _ = New(root, sdb, snapTree) stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(10)) - snapTree.WithBlockHashes(common.Hash{}, common.Hash{}) - root, _ = stateDB.Commit(0, false) + root, _ = stateDB.Commit(0, false, snapshot.WithBlockHashes(common.Hash{}, common.Hash{})) assertBalances(10, 10, 0) // Add more layers than the cap and ensure the balances and layers are correct @@ -1118,8 +784,7 @@ func TestMultiCoinSnapshot(t *testing.T) { stateDB, _ = New(root, sdb, snapTree) stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(1)) stateDB.AddBalanceMultiCoin(addr, assetID2, big.NewInt(2)) - snapTree.WithBlockHashes(common.Hash{}, common.Hash{}) - root, _ = stateDB.Commit(0, false) + root, _ = stateDB.Commit(0, false, snapshot.WithBlockHashes(common.Hash{}, common.Hash{})) } assertBalances(10, 266, 512) @@ -1128,8 +793,7 @@ func TestMultiCoinSnapshot(t *testing.T) { stateDB, _ = New(root, sdb, snapTree) stateDB.AddBalance(addr, uint256.NewInt(1)) stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(1)) - snapTree.WithBlockHashes(common.Hash{}, common.Hash{}) - root, _ = stateDB.Commit(0, false) + root, _ = stateDB.Commit(0, false, snapshot.WithBlockHashes(common.Hash{}, common.Hash{})) stateDB, _ = New(root, sdb, snapTree) assertBalances(11, 267, 512) } @@ -1150,7 +814,7 @@ func TestGenerateMultiCoinAccounts(t *testing.T) { if err != nil { t.Fatal(err) } - stateDB.SetBalanceMultiCoin(addr, assetID, assetBalance) + stateDB.AddBalanceMultiCoin(addr, assetID, assetBalance) root, err := stateDB.Commit(0, false) if err != nil { t.Fatal(err) @@ -1240,40 +904,6 @@ func TestFlushOrderDataLoss(t *testing.T) { } } -func TestStateDBTransientStorage(t *testing.T) { - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) - state, _ := New(types.EmptyRootHash, db, nil) - - key := common.Hash{0x01} - value := common.Hash{0x02} - addr := common.Address{} - - state.SetTransientState(addr, key, value) - if exp, got := 1, state.journal.length(); exp != got { - t.Fatalf("journal length mismatch: have %d, want %d", got, exp) - } - // the retrieved value should equal what was set - if got := state.GetTransientState(addr, key); got != value { - t.Fatalf("transient storage mismatch: have %x, want %x", got, value) - } - - // revert the transient state being set and then check that the - // value is now the empty hash - state.journal.revert(state, 0) - if got, exp := state.GetTransientState(addr, key), (common.Hash{}); exp != got { - t.Fatalf("transient storage mismatch: have %x, want %x", got, exp) - } - - // set transient state and then copy the statedb and ensure that - // the transient state is copied - state.SetTransientState(addr, key, value) - cpy := state.Copy() - if got := cpy.GetTransientState(addr, key); got != value { - t.Fatalf("transient storage mismatch: have %x, want %x", got, value) - } -} - func TestResetObject(t *testing.T) { var ( disk = rawdb.NewMemoryDatabase() @@ -1294,8 +924,7 @@ func TestResetObject(t *testing.T) { state.CreateAccount(addr) state.SetBalance(addr, uint256.NewInt(2)) state.SetState(addr, slotB, common.BytesToHash([]byte{0x2})) - snaps.WithBlockHashes(common.Hash{}, common.Hash{}) - root, _ := state.Commit(0, true) + root, _ := state.Commit(0, true, snapshot.WithBlockHashes(common.Hash{}, common.Hash{})) // Ensure the original account is wiped properly snap := snaps.Snapshot(root) @@ -1308,58 +937,3 @@ func TestResetObject(t *testing.T) { t.Fatalf("Unexpected storage slot value %v", slot) } } - -func TestDeleteStorage(t *testing.T) { - var ( - disk = rawdb.NewMemoryDatabase() - tdb = triedb.NewDatabase(disk, nil) - db = NewDatabaseWithNodeDB(disk, tdb) - snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, common.Hash{}, types.EmptyRootHash) - state, _ = New(types.EmptyRootHash, db, snaps) - addr = common.HexToAddress("0x1") - ) - // Initialize account and populate storage - state.SetBalance(addr, uint256.NewInt(1)) - state.CreateAccount(addr) - for i := 0; i < 1000; i++ { - slot := common.Hash(uint256.NewInt(uint64(i)).Bytes32()) - value := common.Hash(uint256.NewInt(uint64(10 * i)).Bytes32()) - state.SetState(addr, slot, value) - } - snaps.WithBlockHashes(common.Hash{}, common.Hash{}) - root, _ := state.Commit(0, true) - // Init phase done, create two states, one with snap and one without - fastState, _ := New(root, db, snaps) - slowState, _ := New(root, db, nil) - - obj := fastState.getOrNewStateObject(addr) - storageRoot := obj.data.Root - - _, _, fastNodes, err := fastState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) - if err != nil { - t.Fatal(err) - } - - _, _, slowNodes, err := slowState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) - if err != nil { - t.Fatal(err) - } - check := func(set *trienode.NodeSet) string { - var a []string - set.ForEachWithOrder(func(path string, n *trienode.Node) { - if n.Hash != (common.Hash{}) { - t.Fatal("delete should have empty hashes") - } - if len(n.Blob) != 0 { - t.Fatal("delete should have have empty blobs") - } - a = append(a, fmt.Sprintf("%x", path)) - }) - return strings.Join(a, ",") - } - slowRes := check(slowNodes) - fastRes := check(fastNodes) - if slowRes != fastRes { - t.Fatalf("difference found:\nfast: %v\nslow: %v\n", fastRes, slowRes) - } -} diff --git a/core/state/sync_test.go b/core/state/sync_test.go deleted file mode 100644 index 560f3a96fd..0000000000 --- a/core/state/sync_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package state - -import ( - "math/big" - - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/triedb/hashdb" - "github.com/ava-labs/coreth/triedb/pathdb" - "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/crypto" - "github.com/ava-labs/libevm/ethdb" - "github.com/ava-labs/libevm/triedb" - "github.com/holiman/uint256" -) - -// testAccount is the data associated with an account used by the state tests. -type testAccount struct { - address common.Address - balance *big.Int - nonce uint64 - code []byte -} - -// makeTestState create a sample test state to test node-wise reconstruction. -func makeTestState(scheme string) (ethdb.Database, Database, *triedb.Database, common.Hash, []*testAccount) { - // Create an empty state - config := &triedb.Config{Preimages: true} - if scheme == rawdb.PathScheme { - config.DBOverride = pathdb.Defaults.BackendConstructor - } else { - config.DBOverride = hashdb.Defaults.BackendConstructor - } - db := rawdb.NewMemoryDatabase() - nodeDb := triedb.NewDatabase(db, config) - sdb := NewDatabaseWithNodeDB(db, nodeDb) - state, _ := New(types.EmptyRootHash, sdb, nil) - - // Fill it with some arbitrary data - var accounts []*testAccount - for i := byte(0); i < 96; i++ { - obj := state.getOrNewStateObject(common.BytesToAddress([]byte{i})) - acc := &testAccount{address: common.BytesToAddress([]byte{i})} - - obj.AddBalance(uint256.NewInt(uint64(11 * i))) - acc.balance = big.NewInt(int64(11 * i)) - - obj.SetNonce(uint64(42 * i)) - acc.nonce = uint64(42 * i) - - if i%3 == 0 { - obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i}) - acc.code = []byte{i, i, i, i, i} - } - if i%5 == 0 { - for j := byte(0); j < 5; j++ { - hash := crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j}) - obj.SetState(hash, hash) - } - } - accounts = append(accounts, acc) - } - root, _ := state.Commit(0, false) - - // Return the generated state - return db, sdb, nodeDb, root, accounts -} diff --git a/core/state/transient_storage.go b/core/state/transient_storage.go deleted file mode 100644 index 285ebbc727..0000000000 --- a/core/state/transient_storage.go +++ /dev/null @@ -1,65 +0,0 @@ -// (c) 2023, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package state - -import ( - "github.com/ava-labs/libevm/common" -) - -// transientStorage is a representation of EIP-1153 "Transient Storage". -type transientStorage map[common.Address]Storage - -// newTransientStorage creates a new instance of a transientStorage. -func newTransientStorage() transientStorage { - return make(transientStorage) -} - -// Set sets the transient-storage `value` for `key` at the given `addr`. -func (t transientStorage) Set(addr common.Address, key, value common.Hash) { - if _, ok := t[addr]; !ok { - t[addr] = make(Storage) - } - t[addr][key] = value -} - -// Get gets the transient storage for `key` at the given `addr`. -func (t transientStorage) Get(addr common.Address, key common.Hash) common.Hash { - val, ok := t[addr] - if !ok { - return common.Hash{} - } - return val[key] -} - -// Copy does a deep copy of the transientStorage -func (t transientStorage) Copy() transientStorage { - storage := make(transientStorage) - for key, value := range t { - storage[key] = value.Copy() - } - return storage -} diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go deleted file mode 100644 index d053c78751..0000000000 --- a/core/state/trie_prefetcher.go +++ /dev/null @@ -1,640 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package state - -import ( - "sync" - "time" - - "github.com/ava-labs/coreth/metrics" - "github.com/ava-labs/coreth/utils" - "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/log" -) - -// triePrefetchMetricsPrefix is the prefix under which to publish the metrics. -const triePrefetchMetricsPrefix = "trie/prefetch/" - -// triePrefetcher is an active prefetcher, which receives accounts or storage -// items and does trie-loading of them. The goal is to get as much useful content -// into the caches as possible. -// -// Note, the prefetcher's API is not thread safe. -type triePrefetcher struct { - db Database // Database to fetch trie nodes through - root common.Hash // Root hash of the account trie for metrics - fetches map[string]Trie // Partially or fully fetched tries. Only populated for inactive copies. - fetchers map[string]*subfetcher // Subfetchers for each trie - - maxConcurrency int - workers *utils.BoundedWorkers - - subfetcherWorkersMeter metrics.Meter - subfetcherWaitTimer metrics.Counter - subfetcherCopiesMeter metrics.Meter - - accountLoadMeter metrics.Meter - accountDupMeter metrics.Meter - accountSkipMeter metrics.Meter - accountWasteMeter metrics.Meter - - storageFetchersMeter metrics.Meter - storageLoadMeter metrics.Meter - storageLargestLoadMeter metrics.Meter - storageDupMeter metrics.Meter - storageSkipMeter metrics.Meter - storageWasteMeter metrics.Meter -} - -func newTriePrefetcher(db Database, root common.Hash, namespace string, maxConcurrency int) *triePrefetcher { - prefix := triePrefetchMetricsPrefix + namespace - return &triePrefetcher{ - db: db, - root: root, - fetchers: make(map[string]*subfetcher), // Active prefetchers use the fetchers map - - maxConcurrency: maxConcurrency, - workers: utils.NewBoundedWorkers(maxConcurrency), // Scale up as needed to [maxConcurrency] - - subfetcherWorkersMeter: metrics.GetOrRegisterMeter(prefix+"/subfetcher/workers", nil), - subfetcherWaitTimer: metrics.GetOrRegisterCounter(prefix+"/subfetcher/wait", nil), - subfetcherCopiesMeter: metrics.GetOrRegisterMeter(prefix+"/subfetcher/copies", nil), - - accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil), - accountDupMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup", nil), - accountSkipMeter: metrics.GetOrRegisterMeter(prefix+"/account/skip", nil), - accountWasteMeter: metrics.GetOrRegisterMeter(prefix+"/account/waste", nil), - - storageFetchersMeter: metrics.GetOrRegisterMeter(prefix+"/storage/fetchers", nil), - storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil), - storageLargestLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/lload", nil), - storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil), - storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil), - storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil), - } -} - -// close iterates over all the subfetchers, aborts any that were left spinning -// and reports the stats to the metrics subsystem. -func (p *triePrefetcher) close() { - // If the prefetcher is an inactive one, bail out - if p.fetches != nil { - return - } - - // Collect stats from all fetchers - var ( - storageFetchers int64 - largestLoad int64 - ) - for _, fetcher := range p.fetchers { - fetcher.abort() // safe to call multiple times (should be a no-op on happy path) - - if metrics.Enabled { - p.subfetcherCopiesMeter.Mark(int64(fetcher.copies())) - - if fetcher.root == p.root { - p.accountLoadMeter.Mark(int64(len(fetcher.seen))) - p.accountDupMeter.Mark(int64(fetcher.dups)) - p.accountSkipMeter.Mark(int64(fetcher.skips())) - - for _, key := range fetcher.used { - delete(fetcher.seen, string(key)) - } - p.accountWasteMeter.Mark(int64(len(fetcher.seen))) - } else { - storageFetchers++ - oseen := int64(len(fetcher.seen)) - if oseen > largestLoad { - largestLoad = oseen - } - p.storageLoadMeter.Mark(oseen) - p.storageDupMeter.Mark(int64(fetcher.dups)) - p.storageSkipMeter.Mark(int64(fetcher.skips())) - - for _, key := range fetcher.used { - delete(fetcher.seen, string(key)) - } - p.storageWasteMeter.Mark(int64(len(fetcher.seen))) - } - } - } - if metrics.Enabled { - p.storageFetchersMeter.Mark(storageFetchers) - p.storageLargestLoadMeter.Mark(largestLoad) - } - - // Stop all workers once fetchers are aborted (otherwise - // could stop while waiting) - // - // Record number of workers that were spawned during this run - workersUsed := int64(p.workers.Wait()) - if metrics.Enabled { - p.subfetcherWorkersMeter.Mark(workersUsed) - } - - // Clear out all fetchers (will crash on a second call, deliberate) - p.fetchers = nil -} - -// copy creates a deep-but-inactive copy of the trie prefetcher. Any trie data -// already loaded will be copied over, but no goroutines will be started. This -// is mostly used in the miner which creates a copy of it's actively mutated -// state to be sealed while it may further mutate the state. -func (p *triePrefetcher) copy() *triePrefetcher { - copy := &triePrefetcher{ - db: p.db, - root: p.root, - fetches: make(map[string]Trie), // Active prefetchers use the fetchers map - - subfetcherWorkersMeter: p.subfetcherWorkersMeter, - subfetcherWaitTimer: p.subfetcherWaitTimer, - subfetcherCopiesMeter: p.subfetcherCopiesMeter, - - accountLoadMeter: p.accountLoadMeter, - accountDupMeter: p.accountDupMeter, - accountSkipMeter: p.accountSkipMeter, - accountWasteMeter: p.accountWasteMeter, - - storageFetchersMeter: p.storageFetchersMeter, - storageLoadMeter: p.storageLoadMeter, - storageLargestLoadMeter: p.storageLargestLoadMeter, - storageDupMeter: p.storageDupMeter, - storageSkipMeter: p.storageSkipMeter, - storageWasteMeter: p.storageWasteMeter, - } - // If the prefetcher is already a copy, duplicate the data - if p.fetches != nil { - for root, fetch := range p.fetches { - if fetch == nil { - continue - } - copy.fetches[root] = p.db.CopyTrie(fetch) - } - return copy - } - // Otherwise we're copying an active fetcher, retrieve the current states - for id, fetcher := range p.fetchers { - copy.fetches[id] = fetcher.peek() - } - return copy -} - -// prefetch schedules a batch of trie items to prefetch. -func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte) { - // If the prefetcher is an inactive one, bail out - if p.fetches != nil { - return - } - - // Active fetcher, schedule the retrievals - id := p.trieID(owner, root) - fetcher := p.fetchers[id] - if fetcher == nil { - fetcher = newSubfetcher(p, owner, root, addr) - p.fetchers[id] = fetcher - } - fetcher.schedule(keys) -} - -// trie returns the trie matching the root hash, or nil if the prefetcher doesn't -// have it. -func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { - // If the prefetcher is inactive, return from existing deep copies - id := p.trieID(owner, root) - if p.fetches != nil { - trie := p.fetches[id] - if trie == nil { - return nil - } - return p.db.CopyTrie(trie) - } - - // Otherwise the prefetcher is active, bail if no trie was prefetched for this root - fetcher := p.fetchers[id] - if fetcher == nil { - return nil - } - - // Wait for the fetcher to finish and shutdown orchestrator, if it exists - start := time.Now() - fetcher.wait() - if metrics.Enabled { - p.subfetcherWaitTimer.Inc(time.Since(start).Milliseconds()) - } - - // Return a copy of one of the prefetched tries - trie := fetcher.peek() - if trie == nil { - return nil - } - return trie -} - -// used marks a batch of state items used to allow creating statistics as to -// how useful or wasteful the prefetcher is. -func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte) { - if fetcher := p.fetchers[p.trieID(owner, root)]; fetcher != nil { - fetcher.used = used - } -} - -// trieID returns an unique trie identifier consists the trie owner and root hash. -func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string { - trieID := make([]byte, common.HashLength*2) - copy(trieID, owner.Bytes()) - copy(trieID[common.HashLength:], root.Bytes()) - return string(trieID) -} - -// subfetcher is a trie fetcher goroutine responsible for pulling entries for a -// single trie. It is spawned when a new root is encountered and lives until the -// main prefetcher is paused and either all requested items are processed or if -// the trie being worked on is retrieved from the prefetcher. -type subfetcher struct { - p *triePrefetcher - - db Database // Database to load trie nodes through - state common.Hash // Root hash of the state to prefetch - owner common.Hash // Owner of the trie, usually account hash - root common.Hash // Root hash of the trie to prefetch - addr common.Address // Address of the account that the trie belongs to - - to *trieOrchestrator // Orchestrate concurrent fetching of a single trie - - seen map[string]struct{} // Tracks the entries already loaded - dups int // Number of duplicate preload tasks - used [][]byte // Tracks the entries used in the end -} - -// newSubfetcher creates a goroutine to prefetch state items belonging to a -// particular root hash. -func newSubfetcher(p *triePrefetcher, owner common.Hash, root common.Hash, addr common.Address) *subfetcher { - sf := &subfetcher{ - p: p, - db: p.db, - state: p.root, - owner: owner, - root: root, - addr: addr, - seen: make(map[string]struct{}), - } - sf.to = newTrieOrchestrator(sf) - if sf.to != nil { - go sf.to.processTasks() - } - // We return [sf] here to ensure we don't try to re-create if - // we aren't able to setup a [newTrieOrchestrator] the first time. - return sf -} - -// schedule adds a batch of trie keys to the queue to prefetch. -// This should never block, so an array is used instead of a channel. -// -// This is not thread-safe. -func (sf *subfetcher) schedule(keys [][]byte) { - // Append the tasks to the current queue - tasks := make([][]byte, 0, len(keys)) - for _, key := range keys { - // Check if keys already seen - sk := string(key) - if _, ok := sf.seen[sk]; ok { - sf.dups++ - continue - } - sf.seen[sk] = struct{}{} - tasks = append(tasks, key) - } - - // After counting keys, exit if they can't be prefetched - if sf.to == nil { - return - } - - // Add tasks to queue for prefetching - sf.to.enqueueTasks(tasks) -} - -// peek tries to retrieve a deep copy of the fetcher's trie in whatever form it -// is currently. -func (sf *subfetcher) peek() Trie { - if sf.to == nil { - return nil - } - return sf.to.copyBase() -} - -// wait must only be called if [triePrefetcher] has not been closed. If this happens, -// workers will not finish. -func (sf *subfetcher) wait() { - if sf.to == nil { - // Unable to open trie - return - } - sf.to.wait() -} - -func (sf *subfetcher) abort() { - if sf.to == nil { - // Unable to open trie - return - } - sf.to.abort() -} - -func (sf *subfetcher) skips() int { - if sf.to == nil { - // Unable to open trie - return 0 - } - return sf.to.skipCount() -} - -func (sf *subfetcher) copies() int { - if sf.to == nil { - // Unable to open trie - return 0 - } - return sf.to.copies -} - -// trieOrchestrator is not thread-safe. -type trieOrchestrator struct { - sf *subfetcher - - // base is an unmodified Trie we keep for - // creating copies for each worker goroutine. - // - // We care more about quick copies than good copies - // because most (if not all) of the nodes that will be populated - // in the copy will come from the underlying triedb cache. Ones - // that don't come from this cache probably had to be fetched - // from disk anyways. - base Trie - baseLock sync.Mutex - - tasksAllowed bool - skips int // number of tasks skipped - pendingTasks [][]byte - taskLock sync.Mutex - - processingTasks sync.WaitGroup - - wake chan struct{} - stop chan struct{} - stopOnce sync.Once - loopTerm chan struct{} - - copies int - copyChan chan Trie - copySpawner chan struct{} -} - -func newTrieOrchestrator(sf *subfetcher) *trieOrchestrator { - // Start by opening the trie and stop processing if it fails - var ( - base Trie - err error - ) - if sf.owner == (common.Hash{}) { - base, err = sf.db.OpenTrie(sf.root) - if err != nil { - log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) - return nil - } - } else { - // The trie argument can be nil as verkle doesn't support prefetching - // yet. TODO FIX IT(rjl493456442), otherwise code will panic here. - base, err = sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, nil) - if err != nil { - log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) - return nil - } - } - - // Instantiate trieOrchestrator - to := &trieOrchestrator{ - sf: sf, - base: base, - - tasksAllowed: true, - wake: make(chan struct{}, 1), - stop: make(chan struct{}), - loopTerm: make(chan struct{}), - - copyChan: make(chan Trie, sf.p.maxConcurrency), - copySpawner: make(chan struct{}, sf.p.maxConcurrency), - } - - // Create initial trie copy - to.copies++ - to.copySpawner <- struct{}{} - to.copyChan <- to.copyBase() - return to -} - -func (to *trieOrchestrator) copyBase() Trie { - to.baseLock.Lock() - defer to.baseLock.Unlock() - - return to.sf.db.CopyTrie(to.base) -} - -func (to *trieOrchestrator) skipCount() int { - to.taskLock.Lock() - defer to.taskLock.Unlock() - - return to.skips -} - -func (to *trieOrchestrator) enqueueTasks(tasks [][]byte) { - to.taskLock.Lock() - defer to.taskLock.Unlock() - - if len(tasks) == 0 { - return - } - - // Add tasks to [pendingTasks] - if !to.tasksAllowed { - to.skips += len(tasks) - return - } - to.processingTasks.Add(len(tasks)) - to.pendingTasks = append(to.pendingTasks, tasks...) - - // Wake up processor - select { - case to.wake <- struct{}{}: - default: - } -} - -func (to *trieOrchestrator) handleStop(remaining int) { - to.taskLock.Lock() - to.skips += remaining - to.taskLock.Unlock() - to.processingTasks.Add(-remaining) -} - -func (to *trieOrchestrator) processTasks() { - defer close(to.loopTerm) - - for { - // Determine if we should process or exit - select { - case <-to.wake: - case <-to.stop: - return - } - - // Get current tasks - to.taskLock.Lock() - tasks := to.pendingTasks - to.pendingTasks = nil - to.taskLock.Unlock() - - // Enqueue more work as soon as trie copies are available - lt := len(tasks) - for i := 0; i < lt; i++ { - // Try to stop as soon as possible, if channel is closed - remaining := lt - i - select { - case <-to.stop: - to.handleStop(remaining) - return - default: - } - - // Try to create to get an active copy first (select is non-deterministic, - // so we may end up creating a new copy when we don't need to) - var t Trie - select { - case t = <-to.copyChan: - default: - // Wait for an available copy or create one, if we weren't - // able to get a previously created copy - select { - case <-to.stop: - to.handleStop(remaining) - return - case t = <-to.copyChan: - case to.copySpawner <- struct{}{}: - to.copies++ - t = to.copyBase() - } - } - - // Enqueue work, unless stopped. - fTask := tasks[i] - f := func() { - // Perform task - var err error - if len(fTask) == common.AddressLength { - _, err = t.GetAccount(common.BytesToAddress(fTask)) - } else { - _, err = t.GetStorage(to.sf.addr, fTask) - } - if err != nil { - log.Error("Trie prefetcher failed fetching", "root", to.sf.root, "err", err) - } - to.processingTasks.Done() - - // Return copy when we are done with it, so someone else can use it - // - // channel is buffered and will not block - to.copyChan <- t - } - - // Enqueue task for processing (may spawn new goroutine - // if not at [maxConcurrency]) - // - // If workers are stopped before calling [Execute], this function may - // panic. - to.sf.p.workers.Execute(f) - } - } -} - -func (to *trieOrchestrator) stopAcceptingTasks() { - to.taskLock.Lock() - defer to.taskLock.Unlock() - - if !to.tasksAllowed { - return - } - to.tasksAllowed = false - - // We don't clear [to.pendingTasks] here because - // it will be faster to prefetch them even though we - // are still waiting. -} - -// wait stops accepting new tasks and waits for ongoing tasks to complete. If -// wait is called, it is not necessary to call [abort]. -// -// It is safe to call wait multiple times. -func (to *trieOrchestrator) wait() { - // Prevent more tasks from being enqueued - to.stopAcceptingTasks() - - // Wait for processing tasks to complete - to.processingTasks.Wait() - - // Stop orchestrator loop - to.stopOnce.Do(func() { - close(to.stop) - }) - <-to.loopTerm -} - -// abort stops any ongoing tasks and shuts down the orchestrator loop. If abort -// is called, it is not necessary to call [wait]. -// -// It is safe to call abort multiple times. -func (to *trieOrchestrator) abort() { - // Prevent more tasks from being enqueued - to.stopAcceptingTasks() - - // Stop orchestrator loop - to.stopOnce.Do(func() { - close(to.stop) - }) - <-to.loopTerm - - // Capture any dangling pending tasks (processTasks - // may exit before enqueing all pendingTasks) - to.taskLock.Lock() - pendingCount := len(to.pendingTasks) - to.skips += pendingCount - to.pendingTasks = nil - to.taskLock.Unlock() - to.processingTasks.Add(-pendingCount) - - // Wait for processing tasks to complete - to.processingTasks.Wait() -} diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go deleted file mode 100644 index c0da75c337..0000000000 --- a/core/state/trie_prefetcher_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package state - -import ( - "math/big" - "testing" - "time" - - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/libevm/common" - "github.com/holiman/uint256" -) - -const maxConcurrency = 4 - -func filledStateDB() *StateDB { - state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) - - // Create an account and check if the retrieved balance is correct - addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") - skey := common.HexToHash("aaa") - sval := common.HexToHash("bbb") - - state.SetBalance(addr, uint256.NewInt(42)) // Change the account trie - state.SetCode(addr, []byte("hello")) // Change an external metadata - state.SetState(addr, skey, sval) // Change the storage trie - for i := 0; i < 100; i++ { - sk := common.BigToHash(big.NewInt(int64(i))) - state.SetState(addr, sk, sk) // Change the storage trie - } - return state -} - -func TestCopyAndClose(t *testing.T) { - db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", maxConcurrency) - skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - time.Sleep(1 * time.Second) - a := prefetcher.trie(common.Hash{}, db.originalRoot) - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - b := prefetcher.trie(common.Hash{}, db.originalRoot) - cpy := prefetcher.copy() - cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - c := cpy.trie(common.Hash{}, db.originalRoot) - prefetcher.close() - cpy2 := cpy.copy() - cpy2.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - d := cpy2.trie(common.Hash{}, db.originalRoot) - cpy.close() - cpy2.close() - if a.Hash() != b.Hash() || a.Hash() != c.Hash() || a.Hash() != d.Hash() { - t.Fatalf("Invalid trie, hashes should be equal: %v %v %v %v", a.Hash(), b.Hash(), c.Hash(), d.Hash()) - } -} - -func TestUseAfterClose(t *testing.T) { - db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", maxConcurrency) - skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - a := prefetcher.trie(common.Hash{}, db.originalRoot) - prefetcher.close() - b := prefetcher.trie(common.Hash{}, db.originalRoot) - if a == nil { - t.Fatal("Prefetching before close should not return nil") - } - if b != nil { - t.Fatal("Trie after close should return nil") - } -} - -func TestCopyClose(t *testing.T) { - db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", maxConcurrency) - skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - cpy := prefetcher.copy() - a := prefetcher.trie(common.Hash{}, db.originalRoot) - b := cpy.trie(common.Hash{}, db.originalRoot) - prefetcher.close() - c := prefetcher.trie(common.Hash{}, db.originalRoot) - d := cpy.trie(common.Hash{}, db.originalRoot) - if a == nil { - t.Fatal("Prefetching before close should not return nil") - } - if b == nil { - t.Fatal("Copy trie should return nil") - } - if c != nil { - t.Fatal("Trie after close should return nil") - } - if d == nil { - t.Fatal("Copy trie should not return nil") - } -} diff --git a/core/test_blockchain.go b/core/test_blockchain.go index 95065f276a..dacccd2e94 100644 --- a/core/test_blockchain.go +++ b/core/test_blockchain.go @@ -25,11 +25,11 @@ import ( var TestCallbacks = dummy.ConsensusCallbacks{ OnExtraStateChange: func(block *types.Block, sdb *state.StateDB) (*big.Int, *big.Int, error) { - sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(block.Number().Int64())) + sdb.AddBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(block.Number().Int64())) return nil, nil, nil }, OnFinalizeAndAssemble: func(header *types.Header, sdb *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { - sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(header.Number.Int64())) + sdb.AddBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(header.Number.Int64())) return nil, nil, nil, nil }, } diff --git a/core/types/state_account.go b/core/types/state_account.go index d296a0902c..e864b90d0e 100644 --- a/core/types/state_account.go +++ b/core/types/state_account.go @@ -46,17 +46,8 @@ var ( type isMultiCoin bool -var isMultiCoinPayloads = ethtypes.RegisterExtras[isMultiCoin]() +var IsMultiCoinPayloads = ethtypes.RegisterExtras[isMultiCoin]() func IsMultiCoin(a *StateAccount) bool { - return bool(isMultiCoinPayloads.FromStateAccount(a)) -} - -func EnableMultiCoin(a *StateAccount) { - isMultiCoinPayloads.SetOnStateAccount(a, true) -} - -// XXX: Should be removed once we use the upstream statedb -func DisableMultiCoin(a *StateAccount) { - isMultiCoinPayloads.SetOnStateAccount(a, false) + return bool(IsMultiCoinPayloads.FromStateAccount(a)) } diff --git a/nativeasset/contract_test.go b/nativeasset/contract_test.go index c6446ab207..06e0636687 100644 --- a/nativeasset/contract_test.go +++ b/nativeasset/contract_test.go @@ -231,7 +231,7 @@ func TestStatefulPrecompile(t *testing.T) { t.Fatal(err) } statedb.SetBalance(userAddr1, u256Hundred) - statedb.SetBalanceMultiCoin(userAddr1, assetID, bigHundred) + statedb.AddBalanceMultiCoin(userAddr1, assetID, bigHundred) statedb.Finalise(true) return statedb }, @@ -264,7 +264,7 @@ func TestStatefulPrecompile(t *testing.T) { t.Fatal(err) } statedb.SetBalance(userAddr1, u256Hundred) - statedb.SetBalanceMultiCoin(userAddr1, assetID, bigHundred) + statedb.AddBalanceMultiCoin(userAddr1, assetID, bigHundred) statedb.Finalise(true) return statedb }, @@ -299,7 +299,7 @@ func TestStatefulPrecompile(t *testing.T) { t.Fatal(err) } statedb.SetBalance(userAddr1, u256Hundred) - statedb.SetBalanceMultiCoin(userAddr1, assetID, big.NewInt(50)) + statedb.AddBalanceMultiCoin(userAddr1, assetID, big.NewInt(50)) statedb.Finalise(true) return statedb }, @@ -331,7 +331,7 @@ func TestStatefulPrecompile(t *testing.T) { t.Fatal(err) } statedb.SetBalance(userAddr1, uint256.NewInt(50)) - statedb.SetBalanceMultiCoin(userAddr1, assetID, big.NewInt(50)) + statedb.AddBalanceMultiCoin(userAddr1, assetID, big.NewInt(50)) statedb.Finalise(true) return statedb }, @@ -363,7 +363,7 @@ func TestStatefulPrecompile(t *testing.T) { t.Fatal(err) } statedb.SetBalance(userAddr1, u256Hundred) - statedb.SetBalanceMultiCoin(userAddr1, assetID, bigHundred) + statedb.AddBalanceMultiCoin(userAddr1, assetID, bigHundred) statedb.Finalise(true) return statedb }, @@ -384,7 +384,7 @@ func TestStatefulPrecompile(t *testing.T) { t.Fatal(err) } statedb.SetBalance(userAddr1, u256Hundred) - statedb.SetBalanceMultiCoin(userAddr1, assetID, bigHundred) + statedb.AddBalanceMultiCoin(userAddr1, assetID, bigHundred) statedb.Finalise(true) return statedb }, @@ -416,7 +416,7 @@ func TestStatefulPrecompile(t *testing.T) { t.Fatal(err) } statedb.SetBalance(userAddr1, u256Hundred) - statedb.SetBalanceMultiCoin(userAddr1, assetID, bigHundred) + statedb.AddBalanceMultiCoin(userAddr1, assetID, bigHundred) statedb.Finalise(true) return statedb }, @@ -437,7 +437,7 @@ func TestStatefulPrecompile(t *testing.T) { t.Fatal(err) } statedb.SetBalance(userAddr1, u256Hundred) - statedb.SetBalanceMultiCoin(userAddr1, assetID, bigHundred) + statedb.AddBalanceMultiCoin(userAddr1, assetID, bigHundred) statedb.Finalise(true) return statedb }, diff --git a/scripts/eth-allowed-packages.txt b/scripts/eth-allowed-packages.txt index 27ebfcea55..6f69e52bcd 100644 --- a/scripts/eth-allowed-packages.txt +++ b/scripts/eth-allowed-packages.txt @@ -25,6 +25,7 @@ "github.com/ava-labs/libevm/ethdb/pebble" "github.com/ava-labs/libevm/event" "github.com/ava-labs/libevm/libevm" +"github.com/ava-labs/libevm/libevm/stateconf" "github.com/ava-labs/libevm/log" "github.com/ava-labs/libevm/params" "github.com/ava-labs/libevm/rlp" diff --git a/sync/statesync/state_syncer.go b/sync/statesync/state_syncer.go index ba1203de12..7031979208 100644 --- a/sync/statesync/state_syncer.go +++ b/sync/statesync/state_syncer.go @@ -35,12 +35,12 @@ type StateSyncerConfig struct { // stateSync keeps the state of the entire state sync operation. type stateSync struct { - db ethdb.Database // database we are syncing - root common.Hash // root of the EVM state we are syncing to - trieDB *triedb.Database // trieDB on top of db we are syncing. used to restore any existing tries. - snapshot snapshot.Snapshot // used to access the database we are syncing as a snapshot. - batchSize int // write batches when they reach this size - client syncclient.Client // used to contact peers over the network + db ethdb.Database // database we are syncing + root common.Hash // root of the EVM state we are syncing to + trieDB *triedb.Database // trieDB on top of db we are syncing. used to restore any existing tries. + snapshot snapshot.SnapshotIterable // used to access the database we are syncing as a snapshot. + batchSize int // write batches when they reach this size + client syncclient.Client // used to contact peers over the network segments chan syncclient.LeafSyncTask // channel of tasks to sync syncer *syncclient.CallbackLeafSyncer // performs the sync, looping over each task's range and invoking specified callbacks