From 3da6b1cc24fd7f4f3b14fe12a01e000cf7317117 Mon Sep 17 00:00:00 2001 From: Jared Wasinger Date: Tue, 20 Feb 2024 05:52:57 -0800 Subject: [PATCH 1/9] core/state: trie prefetcher change: calling trie() doesn't stop the associated subfetcher MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Martin HS Co-authored-by: Péter Szilágyi --- core/state/statedb.go | 7 - core/state/trie_prefetcher.go | 217 +++++++++-------------------- core/state/trie_prefetcher_test.go | 53 +------ 3 files changed, 68 insertions(+), 209 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 66cfc8f05a32..f4022f7f18da 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -739,13 +739,6 @@ func (s *StateDB) Copy() *StateDB { // in the middle of a transaction. state.accessList = s.accessList.Copy() state.transientStorage = s.transientStorage.Copy() - - // If there's a prefetcher running, make an inactive copy of it that can - // only access data but does not actively preload (since the user will not - // know that they need to explicitly terminate an active copy). - if s.prefetcher != nil { - state.prefetcher = s.prefetcher.copy() - } return state } diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index c2a49417d458..a81e528e6cfe 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -37,8 +37,8 @@ var ( type triePrefetcher struct { db Database // Database to fetch trie nodes through root common.Hash // Root hash of the account trie for metrics - fetches map[string]Trie // Partially or fully fetched tries. Only populated for inactive copies. fetchers map[string]*subfetcher // Subfetchers for each trie + closed bool deliveryMissMeter metrics.Meter accountLoadMeter metrics.Meter @@ -71,11 +71,12 @@ func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePre return p } -// close iterates over all the subfetchers, aborts any that were left spinning -// and reports the stats to the metrics subsystem. +// close iterates over all the subfetchers, waits on any that were left spinning +// and reports the stats to the metrics subsystem. close should not be called +// more than once on a triePrefetcher instance. func (p *triePrefetcher) close() { for _, fetcher := range p.fetchers { - fetcher.abort() // safe to do multiple times + fetcher.close() if metrics.Enabled { if fetcher.root == p.root { @@ -99,54 +100,18 @@ func (p *triePrefetcher) close() { } } } - // Clear out all fetchers (will crash on a second call, deliberate) - p.fetchers = nil + p.closed = true } -// copy creates a deep-but-inactive copy of the trie prefetcher. Any trie data -// already loaded will be copied over, but no goroutines will be started. This -// is mostly used in the miner which creates a copy of it's actively mutated -// state to be sealed while it may further mutate the state. -func (p *triePrefetcher) copy() *triePrefetcher { - copy := &triePrefetcher{ - db: p.db, - root: p.root, - fetches: make(map[string]Trie), // Active prefetchers use the fetches map - - deliveryMissMeter: p.deliveryMissMeter, - accountLoadMeter: p.accountLoadMeter, - accountDupMeter: p.accountDupMeter, - accountSkipMeter: p.accountSkipMeter, - accountWasteMeter: p.accountWasteMeter, - storageLoadMeter: p.storageLoadMeter, - storageDupMeter: p.storageDupMeter, - storageSkipMeter: p.storageSkipMeter, - storageWasteMeter: p.storageWasteMeter, - } - // If the prefetcher is already a copy, duplicate the data - if p.fetches != nil { - for root, fetch := range p.fetches { - if fetch == nil { - continue - } - copy.fetches[root] = p.db.CopyTrie(fetch) - } - return copy - } - // Otherwise we're copying an active fetcher, retrieve the current states - for id, fetcher := range p.fetchers { - copy.fetches[id] = fetcher.peek() - } - return copy -} - -// prefetch schedules a batch of trie items to prefetch. +// prefetch schedules a batch of trie items to prefetch. After the prefetcher is closed, all the following tasks scheduled will not be executed. +// +// prefetch is called from two locations: +// 1. Finalize of the state-objects storage roots. This happens at the end +// of every transaction, meaning that if several transactions touches +// upon the same contract, the parameters invoking this method may be +// repeated. +// 2. Finalize of the main account trie. This happens only once per block. func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte) { - // If the prefetcher is an inactive one, bail out - if p.fetches != nil { - return - } - // Active fetcher, schedule the retrievals id := p.trieID(owner, root) fetcher := p.fetchers[id] if fetcher == nil { @@ -157,34 +122,24 @@ func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr comm } // trie returns the trie matching the root hash, or nil if the prefetcher doesn't -// have it. +// have it. trie is not safe to call concurrently func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { - // If the prefetcher is inactive, return from existing deep copies - id := p.trieID(owner, root) - if p.fetches != nil { - trie := p.fetches[id] - if trie == nil { - p.deliveryMissMeter.Mark(1) - return nil - } - return p.db.CopyTrie(trie) - } - // Otherwise the prefetcher is active, bail if no trie was prefetched for this root - fetcher := p.fetchers[id] - if fetcher == nil { + // Bail if no trie was prefetched for this root + fetcher := p.fetchers[p.trieID(owner, root)] + if fetcher == nil || fetcher.trie == nil { p.deliveryMissMeter.Mark(1) return nil } - // Interrupt the prefetcher if it's by any chance still running and return - // a copy of any pre-loaded trie. - fetcher.abort() // safe to do multiple times - - trie := fetcher.peek() - if trie == nil { - p.deliveryMissMeter.Mark(1) - return nil + if p.closed { + return fetcher.db.CopyTrie(fetcher.trie) } - return trie + trieChan := make(chan Trie) + fetcher.copy <- trieChan + select { + case fetcher.wake <- true: + default: + } + return <-trieChan } // used marks a batch of state items used to allow creating statistics as to @@ -218,10 +173,9 @@ type subfetcher struct { tasks [][]byte // Items queued up for retrieval lock sync.Mutex // Lock protecting the task queue - wake chan struct{} // Wake channel if a new task is scheduled - stop chan struct{} // Channel to interrupt processing + wake chan bool // Wake channel if a new task is scheduled, true if the subfetcher should continue running when there are no pending tasks term chan struct{} // Channel to signal interruption - copy chan chan Trie // Channel to request a copy of the current trie + copy chan chan Trie // channel for retrieving copies of the subfetcher's trie seen map[string]struct{} // Tracks the entries already loaded dups int // Number of duplicate preload tasks @@ -237,10 +191,9 @@ func newSubfetcher(db Database, state common.Hash, owner common.Hash, root commo owner: owner, root: root, addr: addr, - wake: make(chan struct{}, 1), - stop: make(chan struct{}), + wake: make(chan bool, 1), + copy: make(chan chan Trie, 1), term: make(chan struct{}), - copy: make(chan chan Trie), seen: make(map[string]struct{}), } go sf.loop() @@ -251,52 +204,32 @@ func newSubfetcher(db Database, state common.Hash, owner common.Hash, root commo func (sf *subfetcher) schedule(keys [][]byte) { // Append the tasks to the current queue sf.lock.Lock() + sf.tasks = append(sf.tasks, keys...) sf.lock.Unlock() - // Notify the prefetcher, it's fine if it's already terminated select { - case sf.wake <- struct{}{}: + case sf.wake <- true: default: } } -// peek tries to retrieve a deep copy of the fetcher's trie in whatever form it -// is currently. -func (sf *subfetcher) peek() Trie { - ch := make(chan Trie) - select { - case sf.copy <- ch: - // Subfetcher still alive, return copy from it - return <-ch - - case <-sf.term: - // Subfetcher already terminated, return a copy directly - if sf.trie == nil { - return nil - } - return sf.db.CopyTrie(sf.trie) - } -} - -// abort interrupts the subfetcher immediately. It is safe to call abort multiple -// times but it is not thread safe. -func (sf *subfetcher) abort() { - select { - case <-sf.stop: - default: - close(sf.stop) - } +// close waits for the subfetcher to finish its tasks. It cannot be called multiple times +func (sf *subfetcher) close() { + // Notify the prefetcher. The wake-chan is buffered, so this is async. + sf.wake <- false + // Wait for it to terminate <-sf.term } -// loop waits for new tasks to be scheduled and keeps loading them until it runs -// out of tasks or its underlying trie is retrieved for committing. +// loop loads newly-scheduled trie tasks as they are received and loads them, stopping +// when requested. func (sf *subfetcher) loop() { // No matter how the loop stops, signal anyone waiting that it's terminated defer close(sf.term) - // Start by opening the trie and stop processing if it fails + // Any calls to trie + // start by opening the trie and stop processing if it fails. if sf.owner == (common.Hash{}) { trie, err := sf.db.OpenTrie(sf.root) if err != nil { @@ -315,50 +248,34 @@ func (sf *subfetcher) loop() { sf.trie = trie } // Trie opened successfully, keep prefetching items - for { - select { - case <-sf.wake: - // Subfetcher was woken up, retrieve any tasks to avoid spinning the lock - sf.lock.Lock() - tasks := sf.tasks - sf.tasks = nil - sf.lock.Unlock() - - // Prefetch any tasks until the loop is interrupted - for i, task := range tasks { - select { - case <-sf.stop: - // If termination is requested, add any leftover back and return - sf.lock.Lock() - sf.tasks = append(sf.tasks, tasks[i:]...) - sf.lock.Unlock() - return - - case ch := <-sf.copy: - // Somebody wants a copy of the current trie, grant them - ch <- sf.db.CopyTrie(sf.trie) - - default: - // No termination request yet, prefetch the next entry - if _, ok := sf.seen[string(task)]; ok { - sf.dups++ - } else { - if len(task) == common.AddressLength { - sf.trie.GetAccount(common.BytesToAddress(task)) - } else { - sf.trie.GetStorage(sf.addr, task) - } - sf.seen[string(task)] = struct{}{} - } - } + for keepRunning := range sf.wake { + // Subfetcher was woken up, retrieve any tasks to avoid spinning the lock + sf.lock.Lock() + tasks := sf.tasks + sf.tasks = nil + sf.lock.Unlock() + + // Prefetch all tasks + for _, task := range tasks { + if _, ok := sf.seen[string(task)]; ok { + sf.dups++ + continue } - + if len(task) == common.AddressLength { + sf.trie.GetAccount(common.BytesToAddress(task)) + } else { + sf.trie.GetStorage(sf.addr, task) + } + sf.seen[string(task)] = struct{}{} + } + // if any trie retrieval request is made, ensure it is completed + // after pending tasks have been processed. + select { case ch := <-sf.copy: - // Somebody wants a copy of the current trie, grant them ch <- sf.db.CopyTrie(sf.trie) - - case <-sf.stop: - // Termination is requested, abort and leave remaining tasks + default: + } + if !keepRunning { return } } diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go index a616adf98f3a..31a58b4a110f 100644 --- a/core/state/trie_prefetcher_test.go +++ b/core/state/trie_prefetcher_test.go @@ -19,7 +19,6 @@ package state import ( "math/big" "testing" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" @@ -46,31 +45,6 @@ func filledStateDB() *StateDB { return state } -func TestCopyAndClose(t *testing.T) { - db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") - skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - time.Sleep(1 * time.Second) - a := prefetcher.trie(common.Hash{}, db.originalRoot) - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - b := prefetcher.trie(common.Hash{}, db.originalRoot) - cpy := prefetcher.copy() - cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - c := cpy.trie(common.Hash{}, db.originalRoot) - prefetcher.close() - cpy2 := cpy.copy() - cpy2.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - d := cpy2.trie(common.Hash{}, db.originalRoot) - cpy.close() - cpy2.close() - if a.Hash() != b.Hash() || a.Hash() != c.Hash() || a.Hash() != d.Hash() { - t.Fatalf("Invalid trie, hashes should be equal: %v %v %v %v", a.Hash(), b.Hash(), c.Hash(), d.Hash()) - } -} - func TestUseAfterClose(t *testing.T) { db := filledStateDB() prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") @@ -82,32 +56,7 @@ func TestUseAfterClose(t *testing.T) { if a == nil { t.Fatal("Prefetching before close should not return nil") } - if b != nil { - t.Fatal("Trie after close should return nil") - } -} - -func TestCopyClose(t *testing.T) { - db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") - skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - cpy := prefetcher.copy() - a := prefetcher.trie(common.Hash{}, db.originalRoot) - b := cpy.trie(common.Hash{}, db.originalRoot) - prefetcher.close() - c := prefetcher.trie(common.Hash{}, db.originalRoot) - d := cpy.trie(common.Hash{}, db.originalRoot) - if a == nil { - t.Fatal("Prefetching before close should not return nil") - } if b == nil { - t.Fatal("Copy trie should return nil") - } - if c != nil { - t.Fatal("Trie after close should return nil") - } - if d == nil { - t.Fatal("Copy trie should not return nil") + t.Fatal("Trie after close should not return nil") } } From 43babe4f66d6542b9902c1a0fc8c7a220b851115 Mon Sep 17 00:00:00 2001 From: Gary Rong Date: Tue, 5 Mar 2024 15:17:18 +0800 Subject: [PATCH 2/9] core/state: improve prefetcher --- core/state/trie_prefetcher.go | 153 ++++++++++++++++------------- core/state/trie_prefetcher_test.go | 4 +- 2 files changed, 87 insertions(+), 70 deletions(-) diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index a81e528e6cfe..f9d52b9844b4 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -17,6 +17,7 @@ package state import ( + "errors" "sync" "github.com/ethereum/go-ethereum/common" @@ -27,6 +28,9 @@ import ( var ( // triePrefetchMetricsPrefix is the prefix under which to publish the metrics. triePrefetchMetricsPrefix = "trie/prefetch/" + + // errTerminated is returned if any invocation is applied on a terminated fetcher. + errTerminated = errors.New("fetcher is already terminated") ) // triePrefetcher is an active prefetcher, which receives accounts or storage @@ -43,17 +47,15 @@ type triePrefetcher struct { deliveryMissMeter metrics.Meter accountLoadMeter metrics.Meter accountDupMeter metrics.Meter - accountSkipMeter metrics.Meter accountWasteMeter metrics.Meter storageLoadMeter metrics.Meter storageDupMeter metrics.Meter - storageSkipMeter metrics.Meter storageWasteMeter metrics.Meter } func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePrefetcher { prefix := triePrefetchMetricsPrefix + namespace - p := &triePrefetcher{ + return &triePrefetcher{ db: db, root: root, fetchers: make(map[string]*subfetcher), // Active prefetchers use the fetchers map @@ -61,20 +63,20 @@ func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePre deliveryMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss", nil), accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil), accountDupMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup", nil), - accountSkipMeter: metrics.GetOrRegisterMeter(prefix+"/account/skip", nil), accountWasteMeter: metrics.GetOrRegisterMeter(prefix+"/account/waste", nil), storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil), storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil), - storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil), storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil), } - return p } // close iterates over all the subfetchers, waits on any that were left spinning -// and reports the stats to the metrics subsystem. close should not be called -// more than once on a triePrefetcher instance. +// and reports the stats to the metrics subsystem. func (p *triePrefetcher) close() { + // Short circuit if the fetcher is already closed. + if p.closed { + return + } for _, fetcher := range p.fetchers { fetcher.close() @@ -82,8 +84,6 @@ func (p *triePrefetcher) close() { if fetcher.root == p.root { p.accountLoadMeter.Mark(int64(len(fetcher.seen))) p.accountDupMeter.Mark(int64(fetcher.dups)) - p.accountSkipMeter.Mark(int64(len(fetcher.tasks))) - for _, key := range fetcher.used { delete(fetcher.seen, string(key)) } @@ -91,8 +91,6 @@ func (p *triePrefetcher) close() { } else { p.storageLoadMeter.Mark(int64(len(fetcher.seen))) p.storageDupMeter.Mark(int64(fetcher.dups)) - p.storageSkipMeter.Mark(int64(len(fetcher.tasks))) - for _, key := range fetcher.used { delete(fetcher.seen, string(key)) } @@ -101,50 +99,54 @@ func (p *triePrefetcher) close() { } } p.closed = true + p.fetchers = nil } -// prefetch schedules a batch of trie items to prefetch. After the prefetcher is closed, all the following tasks scheduled will not be executed. +// prefetch schedules a batch of trie items to prefetch. After the prefetcher is +// closed, all the following tasks scheduled will not be executed and an error +// will be returned. // // prefetch is called from two locations: +// // 1. Finalize of the state-objects storage roots. This happens at the end // of every transaction, meaning that if several transactions touches // upon the same contract, the parameters invoking this method may be // repeated. // 2. Finalize of the main account trie. This happens only once per block. -func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte) { +func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte) error { + if p.closed { + return errTerminated + } id := p.trieID(owner, root) fetcher := p.fetchers[id] if fetcher == nil { fetcher = newSubfetcher(p.db, p.root, owner, root, addr) p.fetchers[id] = fetcher } - fetcher.schedule(keys) + return fetcher.schedule(keys) } -// trie returns the trie matching the root hash, or nil if the prefetcher doesn't -// have it. trie is not safe to call concurrently +// trie returns the trie matching the root hash, or nil if either the fetcher +// is terminated or the trie is not available. func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { + if p.closed { + return nil + } // Bail if no trie was prefetched for this root fetcher := p.fetchers[p.trieID(owner, root)] - if fetcher == nil || fetcher.trie == nil { + if fetcher == nil { p.deliveryMissMeter.Mark(1) return nil } - if p.closed { - return fetcher.db.CopyTrie(fetcher.trie) - } - trieChan := make(chan Trie) - fetcher.copy <- trieChan - select { - case fetcher.wake <- true: - default: - } - return <-trieChan + return fetcher.peek() } // used marks a batch of state items used to allow creating statistics as to -// how useful or wasteful the prefetcher is. +// how useful or wasteful the fetcher is. func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte) { + if p.closed { + return + } if fetcher := p.fetchers[p.trieID(owner, root)]; fetcher != nil { fetcher.used = used } @@ -173,7 +175,8 @@ type subfetcher struct { tasks [][]byte // Items queued up for retrieval lock sync.Mutex // Lock protecting the task queue - wake chan bool // Wake channel if a new task is scheduled, true if the subfetcher should continue running when there are no pending tasks + wake chan struct{} // Wake channel if a new task is scheduled + stop chan struct{} // Channel to interrupt processing term chan struct{} // Channel to signal interruption copy chan chan Trie // channel for retrieving copies of the subfetcher's trie @@ -191,9 +194,10 @@ func newSubfetcher(db Database, state common.Hash, owner common.Hash, root commo owner: owner, root: root, addr: addr, - wake: make(chan bool, 1), - copy: make(chan chan Trie, 1), + wake: make(chan struct{}), + stop: make(chan struct{}), term: make(chan struct{}), + copy: make(chan chan Trie), seen: make(map[string]struct{}), } go sf.loop() @@ -201,24 +205,41 @@ func newSubfetcher(db Database, state common.Hash, owner common.Hash, root commo } // schedule adds a batch of trie keys to the queue to prefetch. -func (sf *subfetcher) schedule(keys [][]byte) { +func (sf *subfetcher) schedule(keys [][]byte) error { // Append the tasks to the current queue sf.lock.Lock() - sf.tasks = append(sf.tasks, keys...) sf.lock.Unlock() - // Notify the prefetcher, it's fine if it's already terminated + + // Notify the background thread to execute scheduled tasks select { - case sf.wake <- true: - default: + case sf.wake <- struct{}{}: + return nil + case <-sf.term: + return errTerminated + } +} + +// peek tries to retrieve a deep copy of the fetcher's trie. Nil is returned +// if the fetcher is already terminated, or the associated trie is failing +// for opening. +func (sf *subfetcher) peek() Trie { + ch := make(chan Trie) + select { + case sf.copy <- ch: + return <-ch + case <-sf.term: + return nil } } // close waits for the subfetcher to finish its tasks. It cannot be called multiple times func (sf *subfetcher) close() { - // Notify the prefetcher. The wake-chan is buffered, so this is async. - sf.wake <- false - // Wait for it to terminate + select { + case <-sf.stop: + default: + close(sf.stop) + } <-sf.term } @@ -228,8 +249,7 @@ func (sf *subfetcher) loop() { // No matter how the loop stops, signal anyone waiting that it's terminated defer close(sf.term) - // Any calls to trie - // start by opening the trie and stop processing if it fails. + // Start by opening the trie and stop processing if it fails. if sf.owner == (common.Hash{}) { trie, err := sf.db.OpenTrie(sf.root) if err != nil { @@ -238,8 +258,6 @@ func (sf *subfetcher) loop() { } sf.trie = trie } else { - // The trie argument can be nil as verkle doesn't support prefetching - // yet. TODO FIX IT(rjl493456442), otherwise code will panic here. trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, nil) if err != nil { log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) @@ -248,34 +266,33 @@ func (sf *subfetcher) loop() { sf.trie = trie } // Trie opened successfully, keep prefetching items - for keepRunning := range sf.wake { - // Subfetcher was woken up, retrieve any tasks to avoid spinning the lock - sf.lock.Lock() - tasks := sf.tasks - sf.tasks = nil - sf.lock.Unlock() + for { + select { + case <-sf.wake: + // Execute all remaining tasks in single run + sf.lock.Lock() + tasks := sf.tasks + sf.tasks = nil + sf.lock.Unlock() - // Prefetch all tasks - for _, task := range tasks { - if _, ok := sf.seen[string(task)]; ok { - sf.dups++ - continue - } - if len(task) == common.AddressLength { - sf.trie.GetAccount(common.BytesToAddress(task)) - } else { - sf.trie.GetStorage(sf.addr, task) + for _, task := range tasks { + if _, ok := sf.seen[string(task)]; ok { + sf.dups++ + continue + } + if len(task) == common.AddressLength { + sf.trie.GetAccount(common.BytesToAddress(task)) + } else { + sf.trie.GetStorage(sf.addr, task) + } + sf.seen[string(task)] = struct{}{} } - sf.seen[string(task)] = struct{}{} - } - // if any trie retrieval request is made, ensure it is completed - // after pending tasks have been processed. - select { case ch := <-sf.copy: + // Somebody wants a copy of the current trie, grant them. ch <- sf.db.CopyTrie(sf.trie) - default: - } - if !keepRunning { + + case <-sf.stop: + // Termination is requested, abort return } } diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go index 31a58b4a110f..8a54e4beab3d 100644 --- a/core/state/trie_prefetcher_test.go +++ b/core/state/trie_prefetcher_test.go @@ -56,7 +56,7 @@ func TestUseAfterClose(t *testing.T) { if a == nil { t.Fatal("Prefetching before close should not return nil") } - if b == nil { - t.Fatal("Trie after close should not return nil") + if b != nil { + t.Fatal("Trie after close should return nil") } } From f166ce17e79626814dfa2a28a2db358ad19c736b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 12 Apr 2024 15:04:06 +0300 Subject: [PATCH 3/9] core/state: restore async prefetcher stask scheduling --- core/blockchain.go | 8 +- core/state/state_object.go | 17 ++- core/state/statedb.go | 50 ++++++--- core/state/trie_prefetcher.go | 175 +++++++++++++++++++---------- core/state/trie_prefetcher_test.go | 23 ++-- 5 files changed, 180 insertions(+), 93 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 654b4fbdcac2..564410a1bc85 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1806,8 +1806,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) } statedb.SetLogger(bc.logger) - // Enable prefetching to pull in trie node paths while processing transactions - statedb.StartPrefetcher("chain") + // If we are past Byzantium, enable prefetching to pull in trie node paths + // while processing transactions. Before Byzantium the prefetcher is mostly + // useless due to the intermediate root hashing after each transaction. + if bc.chainConfig.IsByzantium(block.Number()) { + statedb.StartPrefetcher("chain") + } activeState = statedb // If we have a followup block, run that against the current state to pre-cache diff --git a/core/state/state_object.go b/core/state/state_object.go index d75ba01376bd..e3200815f71c 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -126,7 +126,12 @@ func (s *stateObject) getTrie() (Trie, error) { // Try fetching from prefetcher first if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil { // When the miner is creating the pending state, there is no prefetcher - s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root) + trie, err := s.db.prefetcher.trie(s.addrHash, s.data.Root) + if err != nil { + log.Error("Failed to retrieve storage pre-fetcher trie", "addr", s.address, "err", err) + } else { + s.trie = trie + } } if s.trie == nil { tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root, s.db.trie) @@ -253,7 +258,7 @@ func (s *stateObject) setState(key common.Hash, value *common.Hash) { // finalise moves all dirty storage slots into the pending area to be hashed or // committed later. It is invoked at the end of every transaction. -func (s *stateObject) finalise(prefetch bool) { +func (s *stateObject) finalise() { slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) for key, value := range s.dirtyStorage { // If the slot is different from its original value, move it into the @@ -268,8 +273,10 @@ func (s *stateObject) finalise(prefetch bool) { delete(s.pendingStorage, key) } } - if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { - s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch) + if s.db.prefetcher != nil && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { + if err := s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch); err != nil { + log.Error("Failed to prefetch slots", "addr", s.address, "slots", len(slotsToPrefetch), "err", err) + } } if len(s.dirtyStorage) > 0 { s.dirtyStorage = make(Storage) @@ -288,7 +295,7 @@ func (s *stateObject) finalise(prefetch bool) { // storage change at all. func (s *stateObject) updateTrie() (Trie, error) { // Make sure all dirty slots are finalized into the pending storage area - s.finalise(false) + s.finalise() // Short circuit if nothing changed, don't bother with hashing anything if len(s.pendingStorage) == 0 { diff --git a/core/state/statedb.go b/core/state/statedb.go index f4022f7f18da..cd91c21196c2 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -210,7 +210,8 @@ func (s *StateDB) SetLogger(l *tracing.Hooks) { // commit phase, most of the needed data is already hot. func (s *StateDB) StartPrefetcher(namespace string) { if s.prefetcher != nil { - s.prefetcher.close() + s.prefetcher.terminate() + s.prefetcher.report() s.prefetcher = nil } if s.snap != nil { @@ -222,7 +223,8 @@ func (s *StateDB) StartPrefetcher(namespace string) { // from the gathered metrics. func (s *StateDB) StopPrefetcher() { if s.prefetcher != nil { - s.prefetcher.close() + s.prefetcher.terminate() + s.prefetcher.report() s.prefetcher = nil } } @@ -809,7 +811,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { delete(s.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect) delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) } else { - obj.finalise(true) // Prefetch slots in the background + obj.finalise() s.markUpdate(addr) } // At this point, also ship the address off to the precacher. The precacher @@ -818,7 +820,9 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure } if s.prefetcher != nil && len(addressesToPrefetch) > 0 { - s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch) + if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch); err != nil { + log.Error("Failed to prefetch addresses", "addresses", len(addressesToPrefetch), "err", err) + } } // Invalidate journal because reverting across transactions is not allowed. s.clearJournalAndRefund() @@ -831,18 +835,14 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // Finalise all the dirty storage states and write them into the tries s.Finalise(deleteEmptyObjects) - // If there was a trie prefetcher operating, it gets aborted and irrevocably - // modified after we start retrieving tries. Remove it from the statedb after - // this round of use. - // - // This is weird pre-byzantium since the first tx runs with a prefetcher and - // the remainder without, but pre-byzantium even the initial prefetcher is - // useless, so no sleep lost. - prefetcher := s.prefetcher + // If there was a trie prefetcher operating, terminate it (blocking until + // all tasks finish) and then proceed with the trie hashing. + var subfetchers chan *subfetcher if s.prefetcher != nil { + subfetchers = s.prefetcher.terminateAsync() defer func() { - s.prefetcher.close() - s.prefetcher = nil + s.prefetcher.report() + s.prefetcher = nil // Pre-byzantium, unset any used up prefetcher }() } // Although naively it makes sense to retrieve the account trie and then do @@ -851,6 +851,18 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // first, giving the account prefetches just a few more milliseconds of time // to pull useful data from disk. start := time.Now() + + updated := make(map[common.Address]struct{}) + if subfetchers != nil { + for f := range subfetchers { + if op, ok := s.mutations[f.addr]; ok { + if !op.applied && !op.isDelete() { + s.stateObjects[f.addr].updateRoot() + } + updated[f.addr] = struct{}{} + } + } + } for addr, op := range s.mutations { if op.applied { continue @@ -865,8 +877,10 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // Now we're about to start to write changes to the trie. The trie is so far // _untouched_. We can check with the prefetcher, if it can give us a trie // which has the same root, but also has some content loaded into it. - if prefetcher != nil { - if trie := prefetcher.trie(common.Hash{}, s.originalRoot); trie != nil { + if s.prefetcher != nil { + if trie, err := s.prefetcher.trie(common.Hash{}, s.originalRoot); err != nil { + log.Error("Failed to retrieve account pre-fetcher trie", "err", err) + } else if trie != nil { s.trie = trie } } @@ -902,8 +916,8 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { s.deleteStateObject(deletedAddr) s.AccountDeleted += 1 } - if prefetcher != nil { - prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) + if s.prefetcher != nil { + s.prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) } // Track the amount of time wasted on hashing the account trie defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index f9d52b9844b4..c20915cea109 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -29,8 +29,13 @@ var ( // triePrefetchMetricsPrefix is the prefix under which to publish the metrics. triePrefetchMetricsPrefix = "trie/prefetch/" - // errTerminated is returned if any invocation is applied on a terminated fetcher. + // errTerminated is returned if a fetcher is attempted to be operated after it + // has already terminated. errTerminated = errors.New("fetcher is already terminated") + + // errNotTerminated is returned if a fetchers data is attempted to be retrieved + // before it terminates. + errNotTerminated = errors.New("fetcher is not yet terminated") ) // triePrefetcher is an active prefetcher, which receives accounts or storage @@ -42,7 +47,7 @@ type triePrefetcher struct { db Database // Database to fetch trie nodes through root common.Hash // Root hash of the account trie for metrics fetchers map[string]*subfetcher // Subfetchers for each trie - closed bool + term chan struct{} // Channel to signal interruption deliveryMissMeter metrics.Meter accountLoadMeter metrics.Meter @@ -59,6 +64,7 @@ func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePre db: db, root: root, fetchers: make(map[string]*subfetcher), // Active prefetchers use the fetchers map + term: make(chan struct{}), deliveryMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss", nil), accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil), @@ -70,36 +76,74 @@ func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePre } } -// close iterates over all the subfetchers, waits on any that were left spinning -// and reports the stats to the metrics subsystem. -func (p *triePrefetcher) close() { - // Short circuit if the fetcher is already closed. - if p.closed { +// terminate iterates over all the subfetchers, waiting on any that still spin. +func (p *triePrefetcher) terminate() { + // Short circuit if the fetcher is already closed + select { + case <-p.term: return + default: } + // Termiante all sub-fetchers synchronously and close the main fetcher for _, fetcher := range p.fetchers { - fetcher.close() + fetcher.terminate() + } + close(p.term) +} - if metrics.Enabled { - if fetcher.root == p.root { - p.accountLoadMeter.Mark(int64(len(fetcher.seen))) - p.accountDupMeter.Mark(int64(fetcher.dups)) - for _, key := range fetcher.used { - delete(fetcher.seen, string(key)) - } - p.accountWasteMeter.Mark(int64(len(fetcher.seen))) - } else { - p.storageLoadMeter.Mark(int64(len(fetcher.seen))) - p.storageDupMeter.Mark(int64(fetcher.dups)) - for _, key := range fetcher.used { - delete(fetcher.seen, string(key)) - } - p.storageWasteMeter.Mark(int64(len(fetcher.seen))) +// terminateAsync iterates over all the subfetchers and terminates them async, +// feeding each into a result channel as they finish. +func (p *triePrefetcher) terminateAsync() chan *subfetcher { + // Short circuit if the fetcher is already closed + select { + case <-p.term: + return nil + default: + } + // Terminate all the sub-fetchers asynchronously and feed them into a result + // channel as they finish + var ( + res = make(chan *subfetcher, len(p.fetchers)) + pend sync.WaitGroup + ) + for _, fetcher := range p.fetchers { + pend.Add(1) + go func(f *subfetcher) { + f.terminate() + res <- f + pend.Done() + }(fetcher) + } + go func() { + pend.Wait() + close(res) + }() + close(p.term) + return res +} + +// report aggregates the pre-fetching and usage metrics and reports them. +func (p *triePrefetcher) report() { + if !metrics.Enabled { + return + } + for _, fetcher := range p.fetchers { + if fetcher.root == p.root { + p.accountLoadMeter.Mark(int64(len(fetcher.seen))) + p.accountDupMeter.Mark(int64(fetcher.dups)) + for _, key := range fetcher.used { + delete(fetcher.seen, string(key)) + } + p.accountWasteMeter.Mark(int64(len(fetcher.seen))) + } else { + p.storageLoadMeter.Mark(int64(len(fetcher.seen))) + p.storageDupMeter.Mark(int64(fetcher.dups)) + for _, key := range fetcher.used { + delete(fetcher.seen, string(key)) } + p.storageWasteMeter.Mark(int64(len(fetcher.seen))) } } - p.closed = true - p.fetchers = nil } // prefetch schedules a batch of trie items to prefetch. After the prefetcher is @@ -114,8 +158,11 @@ func (p *triePrefetcher) close() { // repeated. // 2. Finalize of the main account trie. This happens only once per block. func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte) error { - if p.closed { + // Ensure the subfetcher is still alive + select { + case <-p.term: return errTerminated + default: } id := p.trieID(owner, root) fetcher := p.fetchers[id] @@ -128,15 +175,13 @@ func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr comm // trie returns the trie matching the root hash, or nil if either the fetcher // is terminated or the trie is not available. -func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { - if p.closed { - return nil - } +func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) (Trie, error) { // Bail if no trie was prefetched for this root fetcher := p.fetchers[p.trieID(owner, root)] if fetcher == nil { + log.Warn("Prefetcher missed to load trie", "owner", owner, "root", root) p.deliveryMissMeter.Mark(1) - return nil + return nil, nil } return fetcher.peek() } @@ -144,9 +189,6 @@ func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { // used marks a batch of state items used to allow creating statistics as to // how useful or wasteful the fetcher is. func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte) { - if p.closed { - return - } if fetcher := p.fetchers[p.trieID(owner, root)]; fetcher != nil { fetcher.used = used } @@ -175,10 +217,9 @@ type subfetcher struct { tasks [][]byte // Items queued up for retrieval lock sync.Mutex // Lock protecting the task queue - wake chan struct{} // Wake channel if a new task is scheduled - stop chan struct{} // Channel to interrupt processing - term chan struct{} // Channel to signal interruption - copy chan chan Trie // channel for retrieving copies of the subfetcher's trie + wake chan struct{} // Wake channel if a new task is scheduled + stop chan struct{} // Channel to interrupt processing + term chan struct{} // Channel to signal interruption seen map[string]struct{} // Tracks the entries already loaded dups int // Number of duplicate preload tasks @@ -194,10 +235,9 @@ func newSubfetcher(db Database, state common.Hash, owner common.Hash, root commo owner: owner, root: root, addr: addr, - wake: make(chan struct{}), + wake: make(chan struct{}, 1), stop: make(chan struct{}), term: make(chan struct{}), - copy: make(chan chan Trie), seen: make(map[string]struct{}), } go sf.loop() @@ -206,6 +246,12 @@ func newSubfetcher(db Database, state common.Hash, owner common.Hash, root commo // schedule adds a batch of trie keys to the queue to prefetch. func (sf *subfetcher) schedule(keys [][]byte) error { + // Ensure the subfetcher is still alive + select { + case <-sf.term: + return errTerminated + default: + } // Append the tasks to the current queue sf.lock.Lock() sf.tasks = append(sf.tasks, keys...) @@ -214,27 +260,31 @@ func (sf *subfetcher) schedule(keys [][]byte) error { // Notify the background thread to execute scheduled tasks select { case sf.wake <- struct{}{}: - return nil - case <-sf.term: - return errTerminated + // Wake signal sent + default: + // Wake signal not sent as a previous is already queued } + return nil } -// peek tries to retrieve a deep copy of the fetcher's trie. Nil is returned -// if the fetcher is already terminated, or the associated trie is failing -// for opening. -func (sf *subfetcher) peek() Trie { - ch := make(chan Trie) +// peek retrieves the fetcher's trie, populated with any pre-fetched data. The +// returned trie will be a shallow copy, so modifying it will break subsequent +// peeks for the original data. +// +// This method can only be called after closing the subfetcher. +func (sf *subfetcher) peek() (Trie, error) { + // Ensure the subfetcher finished operating on its trie select { - case sf.copy <- ch: - return <-ch case <-sf.term: - return nil + default: + return nil, errNotTerminated } + return sf.trie, nil } -// close waits for the subfetcher to finish its tasks. It cannot be called multiple times -func (sf *subfetcher) close() { +// terminate waits for the subfetcher to finish its tasks, after which it tears +// down all the internal background loaders. +func (sf *subfetcher) terminate() { select { case <-sf.stop: default: @@ -249,7 +299,7 @@ func (sf *subfetcher) loop() { // No matter how the loop stops, signal anyone waiting that it's terminated defer close(sf.term) - // Start by opening the trie and stop processing if it fails. + // Start by opening the trie and stop processing if it fails if sf.owner == (common.Hash{}) { trie, err := sf.db.OpenTrie(sf.root) if err != nil { @@ -287,13 +337,20 @@ func (sf *subfetcher) loop() { } sf.seen[string(task)] = struct{}{} } - case ch := <-sf.copy: - // Somebody wants a copy of the current trie, grant them. - ch <- sf.db.CopyTrie(sf.trie) case <-sf.stop: - // Termination is requested, abort - return + // Termination is requested, abort if no more tasks are pending. If + // there are some, exhaust them first. + sf.lock.Lock() + done := sf.tasks == nil + sf.lock.Unlock() + + if done { + return + } + // Some tasks are pending, loop and pick them up (that wake branch + // will be selected eventually, whilst stop remains closed to this + // branch will also run afterwards). } } } diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go index 8a54e4beab3d..4e5414f0e368 100644 --- a/core/state/trie_prefetcher_test.go +++ b/core/state/trie_prefetcher_test.go @@ -45,18 +45,23 @@ func filledStateDB() *StateDB { return state } -func TestUseAfterClose(t *testing.T) { +func TestUseAfterTerminate(t *testing.T) { db := filledStateDB() prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - a := prefetcher.trie(common.Hash{}, db.originalRoot) - prefetcher.close() - b := prefetcher.trie(common.Hash{}, db.originalRoot) - if a == nil { - t.Fatal("Prefetching before close should not return nil") + + if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}); err != nil { + t.Errorf("Prefetch failed before terminate: %v", err) + } + if _, err := prefetcher.trie(common.Hash{}, db.originalRoot); err == nil { + t.Errorf("Trie retrieval succeeded before terminate") + } + prefetcher.terminate() + + if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}); err == nil { + t.Errorf("Prefetch succeeded after terminate: %v", err) } - if b != nil { - t.Fatal("Trie after close should return nil") + if _, err := prefetcher.trie(common.Hash{}, db.originalRoot); err != nil { + t.Errorf("Trie retrieval failed after terminate: %v", err) } } From f5ec2e78419c467a465875f67d69feffe6c8d801 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 3 May 2024 12:33:45 +0200 Subject: [PATCH 4/9] core/state: finish prefetching async and process storage updates async --- core/state/state_object.go | 22 +++++++-- core/state/statedb.go | 77 +++++++++++++---------------- core/state/trie_prefetcher.go | 78 ++++++++++-------------------- core/state/trie_prefetcher_test.go | 5 +- 4 files changed, 79 insertions(+), 103 deletions(-) diff --git a/core/state/state_object.go b/core/state/state_object.go index e3200815f71c..4df29ba8114b 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "maps" + "sync" "time" "github.com/ethereum/go-ethereum/common" @@ -33,6 +34,14 @@ import ( "github.com/holiman/uint256" ) +// hasherPool holds a pool of hashers used by state objects during concurrent +// trie updates. +var hasherPool = sync.Pool{ + New: func() interface{} { + return crypto.NewKeccakState() + }, +} + type Storage map[common.Hash]common.Hash func (s Storage) Copy() Storage { @@ -314,6 +323,9 @@ func (s *stateObject) updateTrie() (Trie, error) { // Insert all the pending storage updates into the trie usedStorage := make([][]byte, 0, len(s.pendingStorage)) + hasher := hasherPool.Get().(crypto.KeccakState) + defer hasherPool.Put(hasher) + // Perform trie updates before deletions. This prevents resolution of unnecessary trie nodes // in circumstances similar to the following: // @@ -342,26 +354,30 @@ func (s *stateObject) updateTrie() (Trie, error) { s.db.setError(err) return nil, err } - s.db.StorageUpdated += 1 + s.db.StorageUpdated.Add(1) } else { deletions = append(deletions, key) } // Cache the mutated storage slots until commit if storage == nil { + s.db.storagesLock.Lock() if storage = s.db.storages[s.addrHash]; storage == nil { storage = make(map[common.Hash][]byte) s.db.storages[s.addrHash] = storage } + s.db.storagesLock.Unlock() } - khash := crypto.HashData(s.db.hasher, key[:]) + khash := crypto.HashData(hasher, key[:]) storage[khash] = encoded // encoded will be nil if it's deleted // Cache the original value of mutated storage slots if origin == nil { + s.db.storagesLock.Lock() if origin = s.db.storagesOrigin[s.address]; origin == nil { origin = make(map[common.Hash][]byte) s.db.storagesOrigin[s.address] = origin } + s.db.storagesLock.Unlock() } // Track the original value of slot only if it's mutated first time if _, ok := origin[khash]; !ok { @@ -381,7 +397,7 @@ func (s *stateObject) updateTrie() (Trie, error) { s.db.setError(err) return nil, err } - s.db.StorageDeleted += 1 + s.db.StorageDeleted.Add(1) } // If no slots were touched, issue a warning as we shouldn't have done all // the above work in the first place diff --git a/core/state/statedb.go b/core/state/statedb.go index cd91c21196c2..14e445683055 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -24,6 +24,7 @@ import ( "slices" "sort" "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -92,10 +93,12 @@ type StateDB struct { // These maps hold the state changes (including the corresponding // original value) that occurred in this **block**. - accounts map[common.Hash][]byte // The mutated accounts in 'slim RLP' encoding + accounts map[common.Hash][]byte // The mutated accounts in 'slim RLP' encoding + accountsOrigin map[common.Address][]byte // The original value of mutated accounts in 'slim RLP' encoding + storages map[common.Hash]map[common.Hash][]byte // The mutated slots in prefix-zero trimmed rlp format - accountsOrigin map[common.Address][]byte // The original value of mutated accounts in 'slim RLP' encoding storagesOrigin map[common.Address]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format + storagesLock sync.Mutex // Mutex protecting the maps during concurrent updates/commits // This map holds 'live' objects, which will get modified while // processing a state transition. @@ -161,9 +164,9 @@ type StateDB struct { TrieDBCommits time.Duration AccountUpdated int - StorageUpdated int + StorageUpdated atomic.Int64 AccountDeleted int - StorageDeleted int + StorageDeleted atomic.Int64 // Testing hooks onCommit func(states *triestate.Set) // Hook invoked when commit is performed @@ -210,7 +213,7 @@ func (s *StateDB) SetLogger(l *tracing.Hooks) { // commit phase, most of the needed data is already hot. func (s *StateDB) StartPrefetcher(namespace string) { if s.prefetcher != nil { - s.prefetcher.terminate() + s.prefetcher.terminate(false) s.prefetcher.report() s.prefetcher = nil } @@ -223,7 +226,7 @@ func (s *StateDB) StartPrefetcher(namespace string) { // from the gathered metrics. func (s *StateDB) StopPrefetcher() { if s.prefetcher != nil { - s.prefetcher.terminate() + s.prefetcher.terminate(false) s.prefetcher.report() s.prefetcher = nil } @@ -542,9 +545,6 @@ func (s *StateDB) GetTransientState(addr common.Address, key common.Hash) common // updateStateObject writes the given object to the trie. func (s *StateDB) updateStateObject(obj *stateObject) { - // Track the amount of time wasted on updating the account from the trie - defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) - // Encode the account and update the account trie addr := obj.Address() if err := s.trie.UpdateAccount(addr, &obj.data); err != nil { @@ -573,10 +573,6 @@ func (s *StateDB) updateStateObject(obj *stateObject) { // deleteStateObject removes the given object from the state trie. func (s *StateDB) deleteStateObject(addr common.Address) { - // Track the amount of time wasted on deleting the account from the trie - defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) - - // Delete the account from the trie if err := s.trie.DeleteAccount(addr); err != nil { s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) } @@ -835,48 +831,40 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // Finalise all the dirty storage states and write them into the tries s.Finalise(deleteEmptyObjects) - // If there was a trie prefetcher operating, terminate it (blocking until - // all tasks finish) and then proceed with the trie hashing. - var subfetchers chan *subfetcher + // If there was a trie prefetcher operating, terminate it async so that the + // individual storage tries can be updated as soon as the disk load finishes. if s.prefetcher != nil { - subfetchers = s.prefetcher.terminateAsync() + s.prefetcher.terminate(true) defer func() { s.prefetcher.report() s.prefetcher = nil // Pre-byzantium, unset any used up prefetcher }() } - // Although naively it makes sense to retrieve the account trie and then do - // the contract storage and account updates sequentially, that short circuits - // the account prefetcher. Instead, let's process all the storage updates - // first, giving the account prefetches just a few more milliseconds of time - // to pull useful data from disk. - start := time.Now() - - updated := make(map[common.Address]struct{}) - if subfetchers != nil { - for f := range subfetchers { - if op, ok := s.mutations[f.addr]; ok { - if !op.applied && !op.isDelete() { - s.stateObjects[f.addr].updateRoot() - } - updated[f.addr] = struct{}{} - } - } - } + // Process all storage updates concurrently. The state object update root + // method will internally call a blocking trie fetch from the prefetcher, + // so there's no need to explicitly wait for the prefetchers to finish. + var ( + start = time.Now() + workers errgroup.Group + ) for addr, op := range s.mutations { - if op.applied { - continue - } - if op.isDelete() { + if op.applied || op.isDelete() { continue } - s.stateObjects[addr].updateRoot() + obj := s.stateObjects[addr] // closure for the task runner below + workers.Go(func() error { + obj.updateRoot() + return nil + }) } + workers.Wait() s.StorageUpdates += time.Since(start) // Now we're about to start to write changes to the trie. The trie is so far // _untouched_. We can check with the prefetcher, if it can give us a trie // which has the same root, but also has some content loaded into it. + start = time.Now() + if s.prefetcher != nil { if trie, err := s.prefetcher.trie(common.Hash{}, s.originalRoot); err != nil { log.Error("Failed to retrieve account pre-fetcher trie", "err", err) @@ -916,6 +904,8 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { s.deleteStateObject(deletedAddr) s.AccountDeleted += 1 } + s.AccountUpdates += time.Since(start) + if s.prefetcher != nil { s.prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) } @@ -1258,15 +1248,16 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er return common.Hash{}, err } accountUpdatedMeter.Mark(int64(s.AccountUpdated)) - storageUpdatedMeter.Mark(int64(s.StorageUpdated)) + storageUpdatedMeter.Mark(s.StorageUpdated.Load()) accountDeletedMeter.Mark(int64(s.AccountDeleted)) - storageDeletedMeter.Mark(int64(s.StorageDeleted)) + storageDeletedMeter.Mark(s.StorageDeleted.Load()) accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated)) accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted)) storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated)) storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted)) s.AccountUpdated, s.AccountDeleted = 0, 0 - s.StorageUpdated, s.StorageDeleted = 0, 0 + s.StorageUpdated.Store(0) + s.StorageDeleted.Store(0) // If snapshotting is enabled, update the snapshot tree with this new version if s.snap != nil { diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index c20915cea109..63816ceeccaa 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -76,52 +76,23 @@ func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePre } } -// terminate iterates over all the subfetchers, waiting on any that still spin. -func (p *triePrefetcher) terminate() { +// terminate iterates over all the subfetchers and issues a terminateion request +// to all of them. Depending on the async parameter, the method will either block +// until all subfetchers spin down, or return immediately. +func (p *triePrefetcher) terminate(async bool) { // Short circuit if the fetcher is already closed select { case <-p.term: return default: } - // Termiante all sub-fetchers synchronously and close the main fetcher + // Termiante all sub-fetchers, sync or async, depending on the request for _, fetcher := range p.fetchers { - fetcher.terminate() + fetcher.terminate(async) } close(p.term) } -// terminateAsync iterates over all the subfetchers and terminates them async, -// feeding each into a result channel as they finish. -func (p *triePrefetcher) terminateAsync() chan *subfetcher { - // Short circuit if the fetcher is already closed - select { - case <-p.term: - return nil - default: - } - // Terminate all the sub-fetchers asynchronously and feed them into a result - // channel as they finish - var ( - res = make(chan *subfetcher, len(p.fetchers)) - pend sync.WaitGroup - ) - for _, fetcher := range p.fetchers { - pend.Add(1) - go func(f *subfetcher) { - f.terminate() - res <- f - pend.Done() - }(fetcher) - } - go func() { - pend.Wait() - close(res) - }() - close(p.term) - return res -} - // report aggregates the pre-fetching and usage metrics and reports them. func (p *triePrefetcher) report() { if !metrics.Enabled { @@ -173,17 +144,19 @@ func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr comm return fetcher.schedule(keys) } -// trie returns the trie matching the root hash, or nil if either the fetcher -// is terminated or the trie is not available. +// trie returns the trie matching the root hash, blocking until the fetcher of +// the given trie terminates. If no fetcher exists for the request, nil will be +// returned. func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) (Trie, error) { // Bail if no trie was prefetched for this root fetcher := p.fetchers[p.trieID(owner, root)] if fetcher == nil { - log.Warn("Prefetcher missed to load trie", "owner", owner, "root", root) + log.Error("Prefetcher missed to load trie", "owner", owner, "root", root) p.deliveryMissMeter.Mark(1) return nil, nil } - return fetcher.peek() + // Subfetcher exists, retrieve its trie + return fetcher.peek(), nil } // used marks a batch of state items used to allow creating statistics as to @@ -269,27 +242,26 @@ func (sf *subfetcher) schedule(keys [][]byte) error { // peek retrieves the fetcher's trie, populated with any pre-fetched data. The // returned trie will be a shallow copy, so modifying it will break subsequent -// peeks for the original data. -// -// This method can only be called after closing the subfetcher. -func (sf *subfetcher) peek() (Trie, error) { - // Ensure the subfetcher finished operating on its trie - select { - case <-sf.term: - default: - return nil, errNotTerminated - } - return sf.trie, nil +// peeks for the original data. The method will block until all the scheduled +// data has been loaded and the fethcer terminated. +func (sf *subfetcher) peek() Trie { + // Block until the fertcher terminates, then retrieve the trie + <-sf.term + return sf.trie } -// terminate waits for the subfetcher to finish its tasks, after which it tears -// down all the internal background loaders. -func (sf *subfetcher) terminate() { +// terminate requests the subfetcher to stop accepting new tasks and spin down +// as soon as everything is loaded. Depending on the async parameter, the method +// will either block until all disk loads finish or return immediately. +func (sf *subfetcher) terminate(async bool) { select { case <-sf.stop: default: close(sf.stop) } + if async { + return + } <-sf.term } diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go index 4e5414f0e368..d6788fad9936 100644 --- a/core/state/trie_prefetcher_test.go +++ b/core/state/trie_prefetcher_test.go @@ -53,10 +53,7 @@ func TestUseAfterTerminate(t *testing.T) { if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}); err != nil { t.Errorf("Prefetch failed before terminate: %v", err) } - if _, err := prefetcher.trie(common.Hash{}, db.originalRoot); err == nil { - t.Errorf("Trie retrieval succeeded before terminate") - } - prefetcher.terminate() + prefetcher.terminate(false) if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}); err == nil { t.Errorf("Prefetch succeeded after terminate: %v", err) From 6d3c6a19634c9b3e79752d97cd9d58c21aaa2e62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 6 May 2024 17:24:18 +0300 Subject: [PATCH 5/9] core/state: don't use the prefetcher for missing snapshot items --- core/state/dump.go | 2 +- core/state/state_object.go | 17 +++++++++++------ core/state/statedb_test.go | 2 +- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/core/state/dump.go b/core/state/dump.go index c9aad4f8e234..66802dcb9569 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -165,7 +165,7 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey [] } if !conf.SkipStorage { account.Storage = make(map[common.Hash]string) - tr, err := obj.getTrie() + tr, err := obj.getTrie(true) if err != nil { log.Error("Failed to load storage trie", "err", err) continue diff --git a/core/state/state_object.go b/core/state/state_object.go index 4df29ba8114b..5087bed583ee 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -130,11 +130,16 @@ func (s *stateObject) touch() { // getTrie returns the associated storage trie. The trie will be opened // if it's not loaded previously. An error will be returned if trie can't // be loaded. -func (s *stateObject) getTrie() (Trie, error) { +// +// The skipPrefetcher parameter is used to request a direct load from disk, even +// if a prefetcher is available. This path is used if snapshots are unavailable, +// since that requires reading the trie *during* execution, when the prefetchers +// cannot yet return data. +func (s *stateObject) getTrie(skipPrefetcher bool) (Trie, error) { if s.trie == nil { - // Try fetching from prefetcher first - if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil { - // When the miner is creating the pending state, there is no prefetcher + // Try fetching from prefetcher first, unless skipping it was explicitly + // requested + if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil && !skipPrefetcher { trie, err := s.db.prefetcher.trie(s.addrHash, s.data.Root) if err != nil { log.Error("Failed to retrieve storage pre-fetcher trie", "addr", s.address, "err", err) @@ -211,7 +216,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { // If the snapshot is unavailable or reading from it fails, load from the database. if s.db.snap == nil || err != nil { start := time.Now() - tr, err := s.getTrie() + tr, err := s.getTrie(true) if err != nil { s.db.setError(err) return common.Hash{} @@ -315,7 +320,7 @@ func (s *stateObject) updateTrie() (Trie, error) { storage map[common.Hash][]byte origin map[common.Hash][]byte ) - tr, err := s.getTrie() + tr, err := s.getTrie(false) if err != nil { s.db.setError(err) return nil, err diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 71d64f562898..300ddfce0607 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -551,7 +551,7 @@ func forEachStorage(s *StateDB, addr common.Address, cb func(key, value common.H if so == nil { return nil } - tr, err := so.getTrie() + tr, err := so.getTrie(true) if err != nil { return err } From e9b599c6a17107cd884cbf09593aab9915682dc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 7 May 2024 09:53:49 +0300 Subject: [PATCH 6/9] core/state: remove update concurrency for Verkle tries --- core/state/statedb.go | 8 ++++++++ core/state/trie_prefetcher.go | 4 ---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 14e445683055..8bec4f675cf0 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -847,6 +847,14 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { start = time.Now() workers errgroup.Group ) + if s.db.TrieDB().IsVerkle() { + // Whilst MPT storage tries are independent, Verkle has one single trie + // for all the accounts and all the storage slots merged together. The + // former can thus be simply parallelized, but updating the latter will + // need concurrency support within the trie itself. That's a TODO for a + // later time. + workers.SetLimit(1) + } for addr, op := range s.mutations { if op.applied || op.isDelete() { continue diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 63816ceeccaa..12e29a355146 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -32,10 +32,6 @@ var ( // errTerminated is returned if a fetcher is attempted to be operated after it // has already terminated. errTerminated = errors.New("fetcher is already terminated") - - // errNotTerminated is returned if a fetchers data is attempted to be retrieved - // before it terminates. - errNotTerminated = errors.New("fetcher is not yet terminated") ) // triePrefetcher is an active prefetcher, which receives accounts or storage From 8d9f5eee3f59a71d607bd27c52c5300ea59b725e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 9 May 2024 11:02:17 +0300 Subject: [PATCH 7/9] core/state: add some termination checks to prefetcher async shutdowns --- core/state/trie_prefetcher.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 12e29a355146..7e08964e41bf 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -95,6 +95,8 @@ func (p *triePrefetcher) report() { return } for _, fetcher := range p.fetchers { + fetcher.wait() // ensure the fetcher's idle before poking in its internals + if fetcher.root == p.root { p.accountLoadMeter.Mark(int64(len(fetcher.seen))) p.accountDupMeter.Mark(int64(fetcher.dups)) @@ -159,6 +161,7 @@ func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) (Trie, error) // how useful or wasteful the fetcher is. func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte) { if fetcher := p.fetchers[p.trieID(owner, root)]; fetcher != nil { + fetcher.wait() // ensure the fetcher's idle before poking in its internals fetcher.used = used } } @@ -236,13 +239,19 @@ func (sf *subfetcher) schedule(keys [][]byte) error { return nil } +// wait blocks until the subfetcher terminates. This method is used to block on +// an async termination before accessing internal fields from the fetcher. +func (sf *subfetcher) wait() { + <-sf.term +} + // peek retrieves the fetcher's trie, populated with any pre-fetched data. The // returned trie will be a shallow copy, so modifying it will break subsequent // peeks for the original data. The method will block until all the scheduled // data has been loaded and the fethcer terminated. func (sf *subfetcher) peek() Trie { // Block until the fertcher terminates, then retrieve the trie - <-sf.term + sf.wait() return sf.trie } From 3906158e35b02c18331e4dbf895aac441e1cd34e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 10 May 2024 11:25:11 +0300 Subject: [PATCH 8/9] core/state: differentiate db tries and prefetched tries --- core/state/dump.go | 2 +- core/state/state_object.go | 77 +++++++++++++++++++++++--------------- core/state/statedb_test.go | 2 +- 3 files changed, 49 insertions(+), 32 deletions(-) diff --git a/core/state/dump.go b/core/state/dump.go index 66802dcb9569..c9aad4f8e234 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -165,7 +165,7 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey [] } if !conf.SkipStorage { account.Storage = make(map[common.Hash]string) - tr, err := obj.getTrie(true) + tr, err := obj.getTrie() if err != nil { log.Error("Failed to load storage trie", "err", err) continue diff --git a/core/state/state_object.go b/core/state/state_object.go index 5087bed583ee..a0ecb7738c27 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -127,37 +127,39 @@ func (s *stateObject) touch() { } } -// getTrie returns the associated storage trie. The trie will be opened -// if it's not loaded previously. An error will be returned if trie can't -// be loaded. +// getTrie returns the associated storage trie. The trie will be opened if it' +// not loaded previously. An error will be returned if trie can't be loaded. // -// The skipPrefetcher parameter is used to request a direct load from disk, even -// if a prefetcher is available. This path is used if snapshots are unavailable, -// since that requires reading the trie *during* execution, when the prefetchers -// cannot yet return data. -func (s *stateObject) getTrie(skipPrefetcher bool) (Trie, error) { +// If a new trie is opened, it will be cached within the state object to allow +// subsequent reads to expand the same trie instead of reloading from disk. +func (s *stateObject) getTrie() (Trie, error) { if s.trie == nil { - // Try fetching from prefetcher first, unless skipping it was explicitly - // requested - if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil && !skipPrefetcher { - trie, err := s.db.prefetcher.trie(s.addrHash, s.data.Root) - if err != nil { - log.Error("Failed to retrieve storage pre-fetcher trie", "addr", s.address, "err", err) - } else { - s.trie = trie - } - } - if s.trie == nil { - tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root, s.db.trie) - if err != nil { - return nil, err - } - s.trie = tr + tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root, s.db.trie) + if err != nil { + return nil, err } + s.trie = tr } return s.trie, nil } +// getPrefetchedTrie returns the associated trie, as populated by the prefetcher +// if it's available. +// +// Note, opposed to getTrie, this method will *NOT* blindly cache the resulting +// trie in the state object. The caller might want to do that, but it's cleaner +// to break the hidden interdependency between retrieving tries from the db or +// from the prefetcher. +func (s *stateObject) getPrefetchedTrie() (Trie, error) { + // If there's nothing to meaningfully return, let teh user figure it out by + // pulling the trie from disk. + if s.data.Root == types.EmptyRootHash || s.db.prefetcher == nil { + return nil, nil + } + // Attempt to retrieve the trie from the pretecher + return s.db.prefetcher.trie(s.addrHash, s.data.Root) +} + // GetState retrieves a value from the account storage trie. func (s *stateObject) GetState(key common.Hash) common.Hash { value, _ := s.getState(key) @@ -216,7 +218,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { // If the snapshot is unavailable or reading from it fails, load from the database. if s.db.snap == nil || err != nil { start := time.Now() - tr, err := s.getTrie(true) + tr, err := s.getTrie() if err != nil { s.db.setError(err) return common.Hash{} @@ -315,16 +317,31 @@ func (s *stateObject) updateTrie() (Trie, error) { if len(s.pendingStorage) == 0 { return s.trie, nil } + // Retrieve a pretecher populated trie, or fall back to the database + tr, err := s.getPrefetchedTrie() + switch { + case err != nil: + // Fetcher retrieval failed, something's very wrong, abort + s.db.setError(err) + return nil, err + + case tr == nil: + // Fetcher not running or empty trie, fallback to the database trie + tr, err = s.getTrie() + if err != nil { + s.db.setError(err) + return nil, err + } + + default: + // Prefetcher returned a live trie, swap it out for the current one + s.trie = tr + } // The snapshot storage map for the object var ( storage map[common.Hash][]byte origin map[common.Hash][]byte ) - tr, err := s.getTrie(false) - if err != nil { - s.db.setError(err) - return nil, err - } // Insert all the pending storage updates into the trie usedStorage := make([][]byte, 0, len(s.pendingStorage)) diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 300ddfce0607..71d64f562898 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -551,7 +551,7 @@ func forEachStorage(s *StateDB, addr common.Address, cb func(key, value common.H if so == nil { return nil } - tr, err := so.getTrie(true) + tr, err := so.getTrie() if err != nil { return err } From bfcd4c4c55e00ccbd0fd1393c1f3e542e21f1a4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 10 May 2024 11:28:21 +0300 Subject: [PATCH 9/9] core/state: teh teh teh --- core/state/state_object.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/state/state_object.go b/core/state/state_object.go index a0ecb7738c27..686003a26223 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -151,7 +151,7 @@ func (s *stateObject) getTrie() (Trie, error) { // to break the hidden interdependency between retrieving tries from the db or // from the prefetcher. func (s *stateObject) getPrefetchedTrie() (Trie, error) { - // If there's nothing to meaningfully return, let teh user figure it out by + // If there's nothing to meaningfully return, let the user figure it out by // pulling the trie from disk. if s.data.Root == types.EmptyRootHash || s.db.prefetcher == nil { return nil, nil