diff --git a/accounts/keystore/keystore_test.go b/accounts/keystore/keystore_test.go index f90d809b55f4..54f8bfa36070 100644 --- a/accounts/keystore/keystore_test.go +++ b/accounts/keystore/keystore_test.go @@ -397,19 +397,19 @@ func TestImportRace(t *testing.T) { t.Fatalf("failed to export account: %v", acc) } _, ks2 := tmpKeyStore(t, true) - var atom uint32 + var atom atomic.Uint32 var wg sync.WaitGroup wg.Add(2) for i := 0; i < 2; i++ { go func() { defer wg.Done() if _, err := ks2.Import(json, "new", "new"); err != nil { - atomic.AddUint32(&atom, 1) + atom.Add(1) } }() } wg.Wait() - if atom != 1 { + if atom.Load() != 1 { t.Errorf("Import is racy") } } diff --git a/accounts/usbwallet/hub.go b/accounts/usbwallet/hub.go index 2139967228f5..e67942dbc107 100644 --- a/accounts/usbwallet/hub.go +++ b/accounts/usbwallet/hub.go @@ -63,9 +63,9 @@ type Hub struct { stateLock sync.RWMutex // Protects the internals of the hub from racey access // TODO(karalabe): remove if hotplug lands on Windows - commsPend int // Number of operations blocking enumeration - commsLock sync.Mutex // Lock protecting the pending counter and enumeration - enumFails uint32 // Number of times enumeration has failed + commsPend int // Number of operations blocking enumeration + commsLock sync.Mutex // Lock protecting the pending counter and enumeration + enumFails atomic.Uint32 // Number of times enumeration has failed } // NewLedgerHub creates a new hardware wallet manager for Ledger devices. @@ -151,7 +151,7 @@ func (hub *Hub) refreshWallets() { return } // If USB enumeration is continually failing, don't keep trying indefinitely - if atomic.LoadUint32(&hub.enumFails) > 2 { + if hub.enumFails.Load() > 2 { return } // Retrieve the current list of USB wallet devices @@ -172,7 +172,7 @@ func (hub *Hub) refreshWallets() { } infos, err := usb.Enumerate(hub.vendorID, 0) if err != nil { - failcount := atomic.AddUint32(&hub.enumFails, 1) + failcount := hub.enumFails.Add(1) if runtime.GOOS == "linux" { // See rationale before the enumeration why this is needed and only on Linux. hub.commsLock.Unlock() @@ -181,7 +181,7 @@ func (hub *Hub) refreshWallets() { "vendor", hub.vendorID, "failcount", failcount, "err", err) return } - atomic.StoreUint32(&hub.enumFails, 0) + hub.enumFails.Store(0) for _, info := range infos { for _, id := range hub.productIDs { diff --git a/cmd/geth/attach_test.go b/cmd/geth/attach_test.go index 7c5f951750fb..e717a2210139 100644 --- a/cmd/geth/attach_test.go +++ b/cmd/geth/attach_test.go @@ -61,7 +61,7 @@ func TestRemoteDbWithHeaders(t *testing.T) { } func testReceiveHeaders(t *testing.T, ln net.Listener, gethArgs ...string) { - var ok uint32 + var ok atomic.Bool server := &http.Server{ Addr: "localhost:0", Handler: &testHandler{func(w http.ResponseWriter, r *http.Request) { @@ -72,12 +72,12 @@ func testReceiveHeaders(t *testing.T, ln net.Listener, gethArgs ...string) { if have, want := r.Header.Get("second"), "two"; have != want { t.Fatalf("missing header, have %v want %v", have, want) } - atomic.StoreUint32(&ok, 1) + ok.Store(true) }}} go server.Serve(ln) defer server.Close() runGeth(t, gethArgs...).WaitExit() - if atomic.LoadUint32(&ok) != 1 { + if !ok.Load() { t.Fatal("Test fail, expected invocation to succeed") } } diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 96999075a316..feb92cc99250 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -261,16 +261,16 @@ func importChain(ctx *cli.Context) error { defer db.Close() // Start periodically gathering memory profiles - var peakMemAlloc, peakMemSys uint64 + var peakMemAlloc, peakMemSys atomic.Uint64 go func() { stats := new(runtime.MemStats) for { runtime.ReadMemStats(stats) - if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { - atomic.StoreUint64(&peakMemAlloc, stats.Alloc) + if peakMemAlloc.Load() < stats.Alloc { + peakMemAlloc.Store(stats.Alloc) } - if atomic.LoadUint64(&peakMemSys) < stats.Sys { - atomic.StoreUint64(&peakMemSys, stats.Sys) + if peakMemSys.Load() < stats.Sys { + peakMemSys.Store(stats.Sys) } time.Sleep(5 * time.Second) } @@ -303,8 +303,8 @@ func importChain(ctx *cli.Context) error { mem := new(runtime.MemStats) runtime.ReadMemStats(mem) - fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) - fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) + fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(peakMemAlloc.Load())/1024/1024) + fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(peakMemSys.Load())/1024/1024) fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) diff --git a/cmd/geth/les_test.go b/cmd/geth/les_test.go index 607b454eadfb..2f6db81df026 100644 --- a/cmd/geth/les_test.go +++ b/cmd/geth/les_test.go @@ -107,10 +107,10 @@ func ipcEndpoint(ipcPath, datadir string) string { // but windows require pipes to sit in "\\.\pipe\". Therefore, to run several // nodes simultaneously, we need to distinguish between them, which we do by // the pipe filename instead of folder. -var nextIPC = uint32(0) +var nextIPC atomic.Uint32 func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc { - ipcName := fmt.Sprintf("geth-%d.ipc", atomic.AddUint32(&nextIPC, 1)) + ipcName := fmt.Sprintf("geth-%d.ipc", nextIPC.Add(1)) args = append([]string{"--networkid=42", "--port=0", "--authrpc.port", "0", "--ipcpath", ipcName}, args...) t.Logf("Starting %v with rpc: %v", name, args) diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go index d53918382283..a2a6081f55cf 100644 --- a/consensus/ethash/algorithm.go +++ b/consensus/ethash/algorithm.go @@ -163,7 +163,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { rows := int(size) / hashBytes // Start a monitoring goroutine to report progress on low end devices - var progress uint32 + var progress atomic.Uint32 done := make(chan struct{}) defer close(done) @@ -174,7 +174,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { case <-done: return case <-time.After(3 * time.Second): - logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/(cacheRounds+1), "elapsed", common.PrettyDuration(time.Since(start))) + logger.Info("Generating ethash verification cache", "percentage", progress.Load()*100/uint32(rows)/(cacheRounds+1), "elapsed", common.PrettyDuration(time.Since(start))) } } }() @@ -185,7 +185,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { keccak512(cache, seed) for offset := uint64(hashBytes); offset < size; offset += hashBytes { keccak512(cache[offset:], cache[offset-hashBytes:offset]) - atomic.AddUint32(&progress, 1) + progress.Add(1) } // Use a low-round version of randmemohash temp := make([]byte, hashBytes) @@ -200,7 +200,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { bitutil.XORBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes]) keccak512(cache[dstOff:], temp) - atomic.AddUint32(&progress, 1) + progress.Add(1) } } // Swap the byte order on big endian systems and return @@ -299,7 +299,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { var pend sync.WaitGroup pend.Add(threads) - var progress uint64 + var progress atomic.Uint64 for i := 0; i < threads; i++ { go func(id int) { defer pend.Done() @@ -323,7 +323,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { } copy(dataset[index*hashBytes:], item) - if status := atomic.AddUint64(&progress, 1); status%percent == 0 { + if status := progress.Add(1); status%percent == 0 { logger.Info("Generating DAG in progress", "percentage", (status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start))) } } diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index 6cb312482795..462f10956432 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -308,12 +308,12 @@ func (c *cache) finalizer() { // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. type dataset struct { - epoch uint64 // Epoch for which this cache is relevant - dump *os.File // File descriptor of the memory mapped cache - mmap mmap.MMap // Memory map itself to unmap before releasing - dataset []uint32 // The actual cache data content - once sync.Once // Ensures the cache is generated only once - done uint32 // Atomic flag to determine generation status + epoch uint64 // Epoch for which this cache is relevant + dump *os.File // File descriptor of the memory mapped cache + mmap mmap.MMap // Memory map itself to unmap before releasing + dataset []uint32 // The actual cache data content + once sync.Once // Ensures the cache is generated only once + done atomic.Bool // Atomic flag to determine generation status } // newDataset creates a new ethash mining dataset and returns it as a plain Go @@ -326,7 +326,7 @@ func newDataset(epoch uint64) *dataset { func (d *dataset) generate(dir string, limit int, lock bool, test bool) { d.once.Do(func() { // Mark the dataset generated after we're done. This is needed for remote - defer atomic.StoreUint32(&d.done, 1) + defer d.done.Store(true) csize := cacheSize(d.epoch*epochLength + 1) dsize := datasetSize(d.epoch*epochLength + 1) @@ -390,7 +390,7 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) { // or not (it may not have been started at all). This is useful for remote miners // to default to verification caches instead of blocking on DAG generations. func (d *dataset) generated() bool { - return atomic.LoadUint32(&d.done) == 1 + return d.done.Load() } // finalizer closes any file handlers and memory maps open. diff --git a/core/blockchain.go b/core/blockchain.go index f22562ccfa94..7d3fc1e18863 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -174,7 +174,7 @@ type BlockChain struct { triegc *prque.Prque[int64, common.Hash] // Priority queue mapping block numbers to tries to gc gcproc time.Duration // Accumulates canonical block processing for trie dumping lastWrite uint64 // Last block when the state was flushed - flushInterval int64 // Time interval (processing time) after which to flush a state + flushInterval atomic.Int64 // Time interval (processing time) after which to flush a state triedb *trie.Database // The database handler for maintaining trie nodes. stateCache state.Database // State database to reuse between imports (contains state cache) @@ -215,8 +215,8 @@ type BlockChain struct { wg sync.WaitGroup // quit chan struct{} // shutdown signal, closed in Stop. - running int32 // 0 if chain is running, 1 when stopped - procInterrupt int32 // interrupt signaler for block processing + running atomic.Bool // 0 if chain is running, 1 when stopped + procInterrupt atomic.Bool // interrupt signaler for block processing engine consensus.Engine validator Validator // Block and state validator interface @@ -260,7 +260,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis cacheConfig: cacheConfig, db: db, triedb: triedb, - flushInterval: int64(cacheConfig.TrieTimeLimit), + flushInterval: atomic.Int64{}, triegc: prque.New[int64, common.Hash](nil), quit: make(chan struct{}), chainmu: syncx.NewClosableMutex(), @@ -273,6 +273,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis engine: engine, vmConfig: vmConfig, } + bc.flushInterval.Store(int64(cacheConfig.TrieTimeLimit)) bc.forker = NewForkChoice(bc, shouldPreserve) bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb) bc.validator = NewBlockValidator(chainConfig, bc, engine) @@ -916,7 +917,7 @@ func (bc *BlockChain) writeHeadBlock(block *types.Block) { // This method has been exposed to allow tests to stop the blockchain while simulating // a crash. func (bc *BlockChain) stopWithoutSaving() { - if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { + if !bc.running.CompareAndSwap(false, true) { return } @@ -998,12 +999,12 @@ func (bc *BlockChain) Stop() { // errInsertionInterrupted as soon as possible. Insertion is permanently disabled after // calling this method. func (bc *BlockChain) StopInsert() { - atomic.StoreInt32(&bc.procInterrupt, 1) + bc.procInterrupt.Store(true) } // insertStopped returns true after StopInsert has been called. func (bc *BlockChain) insertStopped() bool { - return atomic.LoadInt32(&bc.procInterrupt) == 1 + return bc.procInterrupt.Load() } func (bc *BlockChain) procFutureBlocks() { @@ -1382,7 +1383,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } // Find the next state trie we need to commit chosen := current - TriesInMemory - flushInterval := time.Duration(atomic.LoadInt64(&bc.flushInterval)) + flushInterval := time.Duration(bc.flushInterval.Load()) // If we exceeded time allowance, flush an entire trie to disk if bc.gcproc > flushInterval { // If the header is missing (canonical chain behind), we're reorging a low @@ -1735,16 +1736,16 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) // If we have a followup block, run that against the current state to pre-cache // transactions and probabilistically some of the account/storage trie nodes. - var followupInterrupt uint32 + var followupInterrupt atomic.Bool if !bc.cacheConfig.TrieCleanNoPrefetch { if followup, err := it.peek(); followup != nil && err == nil { throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps) - go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { + go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *atomic.Bool) { bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) blockPrefetchExecuteTimer.Update(time.Since(start)) - if atomic.LoadUint32(interrupt) == 1 { + if interrupt.Load() { blockPrefetchInterruptMeter.Mark(1) } }(time.Now(), followup, throwaway, &followupInterrupt) @@ -1756,7 +1757,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig) if err != nil { bc.reportBlock(block, receipts, err) - atomic.StoreUint32(&followupInterrupt, 1) + followupInterrupt.Store(true) return it.index, err } @@ -1777,7 +1778,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) substart = time.Now() if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { bc.reportBlock(block, receipts, err) - atomic.StoreUint32(&followupInterrupt, 1) + followupInterrupt.Store(true) return it.index, err } proctime := time.Since(start) @@ -1796,7 +1797,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) } else { status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false) } - atomic.StoreUint32(&followupInterrupt, 1) + followupInterrupt.Store(true) if err != nil { return it.index, err } @@ -2496,5 +2497,5 @@ func (bc *BlockChain) SetBlockValidatorAndProcessorForTesting(v Validator, p Pro // The interval is in terms of block processing time, not wall clock. // It is thread-safe and can be called repeatedly without side effects. func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) { - atomic.StoreInt64(&bc.flushInterval, int64(interval)) + bc.flushInterval.Store(int64(interval)) } diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go index 0d2f6f950d86..d8f932041bc0 100644 --- a/core/bloombits/matcher.go +++ b/core/bloombits/matcher.go @@ -83,7 +83,7 @@ type Matcher struct { retrievals chan chan *Retrieval // Retriever processes waiting for task allocations deliveries chan *Retrieval // Retriever processes waiting for task response deliveries - running uint32 // Atomic flag whether a session is live or not + running atomic.Bool // Atomic flag whether a session is live or not } // NewMatcher creates a new pipeline for retrieving bloom bit streams and doing @@ -146,10 +146,10 @@ func (m *Matcher) addScheduler(idx uint) { // channel is closed. func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uint64) (*MatcherSession, error) { // Make sure we're not creating concurrent sessions - if atomic.SwapUint32(&m.running, 1) == 1 { + if m.running.Swap(true) { return nil, errors.New("matcher already running") } - defer atomic.StoreUint32(&m.running, 0) + defer m.running.Store(false) // Initiate a new matching round session := &MatcherSession{ diff --git a/core/bloombits/matcher_test.go b/core/bloombits/matcher_test.go index 93d4632b8587..36764c3f174b 100644 --- a/core/bloombits/matcher_test.go +++ b/core/bloombits/matcher_test.go @@ -160,7 +160,7 @@ func testMatcher(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, in } } // Track the number of retrieval requests made - var requested uint32 + var requested atomic.Uint32 // Start the matching session for the filter and the retriever goroutines quit := make(chan struct{}) @@ -208,15 +208,15 @@ func testMatcher(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, in session.Close() close(quit) - if retrievals != 0 && requested != retrievals { - t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, have #%v, want #%v", filter, blocks, intermittent, requested, retrievals) + if retrievals != 0 && requested.Load() != retrievals { + t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, have #%v, want #%v", filter, blocks, intermittent, requested.Load(), retrievals) } - return requested + return requested.Load() } // startRetrievers starts a batch of goroutines listening for section requests // and serving them. -func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *uint32, batch int) { +func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *atomic.Uint32, batch int) { requests := make(chan chan *Retrieval) for i := 0; i < 10; i++ { @@ -238,7 +238,7 @@ func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *ui for i, section := range task.Sections { if rand.Int()%4 != 0 { // Handle occasional missing deliveries task.Bitsets[i] = generateBitset(task.Bit, section) - atomic.AddUint32(retrievals, 1) + retrievals.Add(1) } } request <- task diff --git a/core/bloombits/scheduler_test.go b/core/bloombits/scheduler_test.go index 49e113c117ba..dcaaa915258a 100644 --- a/core/bloombits/scheduler_test.go +++ b/core/bloombits/scheduler_test.go @@ -45,13 +45,13 @@ func testScheduler(t *testing.T, clients int, fetchers int, requests int) { fetch := make(chan *request, 16) defer close(fetch) - var delivered uint32 + var delivered atomic.Uint32 for i := 0; i < fetchers; i++ { go func() { defer fetchPend.Done() for req := range fetch { - atomic.AddUint32(&delivered, 1) + delivered.Add(1) f.deliver([]uint64{ req.section + uint64(requests), // Non-requested data (ensure it doesn't go out of bounds) @@ -97,7 +97,7 @@ func testScheduler(t *testing.T, clients int, fetchers int, requests int) { } pend.Wait() - if have := atomic.LoadUint32(&delivered); int(have) != requests { + if have := delivered.Load(); int(have) != requests { t.Errorf("request count mismatch: have %v, want %v", have, requests) } } diff --git a/core/chain_indexer.go b/core/chain_indexer.go index 95901a0eaa71..23ab23ef0fb9 100644 --- a/core/chain_indexer.go +++ b/core/chain_indexer.go @@ -75,7 +75,7 @@ type ChainIndexer struct { backend ChainIndexerBackend // Background processor generating the index data content children []*ChainIndexer // Child indexers to cascade chain updates to - active uint32 // Flag whether the event loop was started + active atomic.Bool // Flag whether the event loop was started update chan struct{} // Notification channel that headers should be processed quit chan chan error // Quit channel to tear down running goroutines ctx context.Context @@ -166,7 +166,7 @@ func (c *ChainIndexer) Close() error { errs = append(errs, err) } // If needed, tear down the secondary event loop - if atomic.LoadUint32(&c.active) != 0 { + if c.active.Load() { c.quit <- errc if err := <-errc; err != nil { errs = append(errs, err) @@ -196,7 +196,7 @@ func (c *ChainIndexer) Close() error { // queue. func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainHeadEvent, sub event.Subscription) { // Mark the chain indexer as active, requiring an additional teardown - atomic.StoreUint32(&c.active, 1) + c.active.Store(true) defer sub.Unsubscribe() diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go index 167afc38894c..e88a72113477 100644 --- a/core/rawdb/chain_freezer.go +++ b/core/rawdb/chain_freezer.go @@ -43,10 +43,7 @@ const ( // The background thread will keep moving ancient chain segments from key-value // database to flat files for saving space on live database. type chainFreezer struct { - // WARNING: The `threshold` field is accessed atomically. On 32 bit platforms, only - // 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned, - // so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG). - threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) + threshold atomic.Uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) *Freezer quit chan struct{} @@ -60,12 +57,14 @@ func newChainFreezer(datadir string, namespace string, readonly bool) (*chainFre if err != nil { return nil, err } - return &chainFreezer{ + f := &chainFreezer{ Freezer: freezer, - threshold: params.FullImmutabilityThreshold, + threshold: atomic.Uint64{}, quit: make(chan struct{}), trigger: make(chan chan struct{}), - }, nil + } + f.threshold.Store(params.FullImmutabilityThreshold) + return f, nil } // Close closes the chain freezer instance and terminates the background thread. @@ -124,8 +123,8 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { continue } number := ReadHeaderNumber(nfdb, hash) - threshold := atomic.LoadUint64(&f.threshold) - frozen := atomic.LoadUint64(&f.frozen) + threshold := f.threshold.Load() + frozen := f.frozen.Load() switch { case number == nil: log.Error("Current full block number unavailable", "hash", hash) @@ -186,7 +185,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { // Wipe out side chains also and track dangling side chains var dangling []common.Hash - frozen = atomic.LoadUint64(&f.frozen) // Needs reload after during freezeRange + frozen = f.frozen.Load() // Needs reload after during freezeRange for number := first; number < frozen; number++ { // Always keep the genesis block in active database if number != 0 { diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go index 102943516eff..6a94f1782449 100644 --- a/core/rawdb/chain_iterator.go +++ b/core/rawdb/chain_iterator.go @@ -132,11 +132,12 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool } } // process runs in parallel - nThreadsAlive := int32(threads) + nThreadsAlive := atomic.Int64{} + nThreadsAlive.Store(int64(threads)) process := func() { defer func() { // Last processor closes the result channel - if atomic.AddInt32(&nThreadsAlive, -1) == 0 { + if nThreadsAlive.Add(-1) == 0 { close(hashesCh) } }() diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 5b7299f38f86..0f930fc7e3e5 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -24,7 +24,6 @@ import ( "path" "path/filepath" "strings" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -72,9 +71,9 @@ func (frdb *freezerdb) Freeze(threshold uint64) error { } // Set the freezer threshold to a temporary value defer func(old uint64) { - atomic.StoreUint64(&frdb.AncientStore.(*chainFreezer).threshold, old) - }(atomic.LoadUint64(&frdb.AncientStore.(*chainFreezer).threshold)) - atomic.StoreUint64(&frdb.AncientStore.(*chainFreezer).threshold, threshold) + frdb.AncientStore.(*chainFreezer).threshold.Store(old) + }(frdb.AncientStore.(*chainFreezer).threshold.Load()) + frdb.AncientStore.(*chainFreezer).threshold.Store(threshold) // Trigger a freeze cycle and block until it's done trigger := make(chan struct{}, 1) diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 60e2c56e0ff7..323dc1ddb326 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -62,11 +62,8 @@ const freezerTableSize = 2 * 1000 * 1000 * 1000 // reserving it for go-ethereum. This would also reduce the memory requirements // of Geth, and thus also GC overhead. type Freezer struct { - // WARNING: The `frozen` and `tail` fields are accessed atomically. On 32 bit platforms, only - // 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned, - // so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG). - frozen uint64 // Number of blocks already frozen - tail uint64 // Number of the first stored item in the freezer + frozen atomic.Uint64 // Number of blocks already frozen + tail atomic.Uint64 // Number of the first stored item in the freezer // This lock synchronizes writers and the truncate operation, as well as // the "atomic" (batched) read operations. @@ -212,12 +209,12 @@ func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][] // Ancients returns the length of the frozen items. func (f *Freezer) Ancients() (uint64, error) { - return atomic.LoadUint64(&f.frozen), nil + return f.frozen.Load(), nil } // Tail returns the number of first stored item in the freezer. func (f *Freezer) Tail() (uint64, error) { - return atomic.LoadUint64(&f.tail), nil + return f.tail.Load(), nil } // AncientSize returns the ancient size of the specified category. @@ -251,7 +248,7 @@ func (f *Freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize defer f.writeLock.Unlock() // Roll back all tables to the starting position in case of error. - prevItem := atomic.LoadUint64(&f.frozen) + prevItem := f.frozen.Load() defer func() { if err != nil { // The write operation has failed. Go back to the previous item position. @@ -272,7 +269,7 @@ func (f *Freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize if err != nil { return 0, err } - atomic.StoreUint64(&f.frozen, item) + f.frozen.Store(item) return writeSize, nil } @@ -284,7 +281,7 @@ func (f *Freezer) TruncateHead(items uint64) error { f.writeLock.Lock() defer f.writeLock.Unlock() - if atomic.LoadUint64(&f.frozen) <= items { + if f.frozen.Load() <= items { return nil } for _, table := range f.tables { @@ -292,7 +289,7 @@ func (f *Freezer) TruncateHead(items uint64) error { return err } } - atomic.StoreUint64(&f.frozen, items) + f.frozen.Store(items) return nil } @@ -304,7 +301,7 @@ func (f *Freezer) TruncateTail(tail uint64) error { f.writeLock.Lock() defer f.writeLock.Unlock() - if atomic.LoadUint64(&f.tail) >= tail { + if f.tail.Load() >= tail { return nil } for _, table := range f.tables { @@ -312,7 +309,7 @@ func (f *Freezer) TruncateTail(tail uint64) error { return err } } - atomic.StoreUint64(&f.tail, tail) + f.tail.Store(tail) return nil } @@ -343,22 +340,22 @@ func (f *Freezer) validate() error { ) // Hack to get boundary of any table for kind, table := range f.tables { - head = atomic.LoadUint64(&table.items) - tail = atomic.LoadUint64(&table.itemHidden) + head = table.items.Load() + tail = table.itemHidden.Load() name = kind break } // Now check every table against those boundaries. for kind, table := range f.tables { - if head != atomic.LoadUint64(&table.items) { - return fmt.Errorf("freezer tables %s and %s have differing head: %d != %d", kind, name, atomic.LoadUint64(&table.items), head) + if head != table.items.Load() { + return fmt.Errorf("freezer tables %s and %s have differing head: %d != %d", kind, name, table.items.Load(), head) } - if tail != atomic.LoadUint64(&table.itemHidden) { - return fmt.Errorf("freezer tables %s and %s have differing tail: %d != %d", kind, name, atomic.LoadUint64(&table.itemHidden), tail) + if tail != table.itemHidden.Load() { + return fmt.Errorf("freezer tables %s and %s have differing tail: %d != %d", kind, name, table.itemHidden.Load(), tail) } } - atomic.StoreUint64(&f.frozen, head) - atomic.StoreUint64(&f.tail, tail) + f.frozen.Store(head) + f.tail.Store(tail) return nil } @@ -369,11 +366,11 @@ func (f *Freezer) repair() error { tail = uint64(0) ) for _, table := range f.tables { - items := atomic.LoadUint64(&table.items) + items := table.items.Load() if head > items { head = items } - hidden := atomic.LoadUint64(&table.itemHidden) + hidden := table.itemHidden.Load() if hidden > tail { tail = hidden } @@ -386,8 +383,8 @@ func (f *Freezer) repair() error { return err } } - atomic.StoreUint64(&f.frozen, head) - atomic.StoreUint64(&f.tail, tail) + f.frozen.Store(head) + f.tail.Store(tail) return nil } @@ -413,7 +410,7 @@ func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error { // and that error will be returned. forEach := func(t *freezerTable, offset uint64, fn func(uint64, []byte) error) error { var ( - items = atomic.LoadUint64(&t.items) + items = t.items.Load() batchSize = uint64(1024) maxBytes = uint64(1024 * 1024) ) @@ -436,7 +433,7 @@ func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error { } // TODO(s1na): This is a sanity-check since as of now no process does tail-deletion. But the migration // process assumes no deletion at tail and needs to be modified to account for that. - if table.itemOffset > 0 || table.itemHidden > 0 { + if table.itemOffset.Load() > 0 || table.itemHidden.Load() > 0 { return fmt.Errorf("migration not supported for tail-deleted freezers") } ancientsPath := filepath.Dir(table.index.Name()) @@ -452,7 +449,7 @@ func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error { out []byte start = time.Now() logged = time.Now() - offset = newTable.items + offset = newTable.items.Load() ) if offset > 0 { log.Info("found previous migration attempt", "migrated", offset) diff --git a/core/rawdb/freezer_batch.go b/core/rawdb/freezer_batch.go index 54c98cee0807..3cc7d84f4ef4 100644 --- a/core/rawdb/freezer_batch.go +++ b/core/rawdb/freezer_batch.go @@ -18,7 +18,6 @@ package rawdb import ( "fmt" - "sync/atomic" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/rlp" @@ -107,7 +106,7 @@ func (t *freezerTable) newBatch() *freezerTableBatch { func (batch *freezerTableBatch) reset() { batch.dataBuffer = batch.dataBuffer[:0] batch.indexBuffer = batch.indexBuffer[:0] - batch.curItem = atomic.LoadUint64(&batch.t.items) + batch.curItem = batch.t.items.Load() batch.totalBytes = 0 } @@ -201,7 +200,7 @@ func (batch *freezerTableBatch) commit() error { // Update headBytes of table. batch.t.headBytes += dataSize - atomic.StoreUint64(&batch.t.items, batch.curItem) + batch.t.items.Store(batch.curItem) // Update metrics. batch.t.sizeGauge.Inc(dataSize + indexSize) diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index b111797d5297..10dfb90ea77d 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -88,18 +88,15 @@ func (i *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId uin // It consists of a data file (snappy encoded arbitrary data blobs) and an indexEntry // file (uncompressed 64 bit indices into the data file). type freezerTable struct { - // WARNING: The `items` field is accessed atomically. On 32 bit platforms, only - // 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned, - // so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG). - items uint64 // Number of items stored in the table (including items removed from tail) - itemOffset uint64 // Number of items removed from the table + items atomic.Uint64 // Number of items stored in the table (including items removed from tail) + itemOffset atomic.Uint64 // Number of items removed from the table // itemHidden is the number of items marked as deleted. Tail deletion is // only supported at file level which means the actual deletion will be // delayed until the entire data file is marked as deleted. Before that // these items will be hidden to prevent being visited again. The value // should never be lower than itemOffset. - itemHidden uint64 + itemHidden atomic.Uint64 noCompression bool // if true, disables snappy compression. Note: does not work retroactively readonly bool @@ -241,14 +238,14 @@ func (t *freezerTable) repair() error { // which is not enough in theory but enough in practice. // TODO: use uint64 to represent total removed items. t.tailId = firstIndex.filenum - t.itemOffset = uint64(firstIndex.offset) + t.itemOffset.Store(uint64(firstIndex.offset)) // Load metadata from the file - meta, err := loadMetadata(t.meta, t.itemOffset) + meta, err := loadMetadata(t.meta, t.itemOffset.Load()) if err != nil { return err } - t.itemHidden = meta.VirtualTail + t.itemHidden.Store(meta.VirtualTail) // Read the last index, use the default value in case the freezer is empty if offsetsSize == indexEntrySize { @@ -331,7 +328,7 @@ func (t *freezerTable) repair() error { } } // Update the item and byte counters and return - t.items = t.itemOffset + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file + t.items.Store(t.itemOffset.Load() + uint64(offsetsSize/indexEntrySize-1)) // last indexEntry points to the end of the data file t.headBytes = contentSize t.headId = lastIndex.filenum @@ -346,9 +343,9 @@ func (t *freezerTable) repair() error { return err } if verbose { - t.logger.Info("Chain freezer table opened", "items", t.items, "size", t.headBytes) + t.logger.Info("Chain freezer table opened", "items", t.items.Load(), "size", t.headBytes) } else { - t.logger.Debug("Chain freezer table opened", "items", t.items, "size", common.StorageSize(t.headBytes)) + t.logger.Debug("Chain freezer table opened", "items", t.items.Load(), "size", common.StorageSize(t.headBytes)) } return nil } @@ -382,11 +379,11 @@ func (t *freezerTable) truncateHead(items uint64) error { defer t.lock.Unlock() // Ensure the given truncate target falls in the correct range - existing := atomic.LoadUint64(&t.items) + existing := t.items.Load() if existing <= items { return nil } - if items < atomic.LoadUint64(&t.itemHidden) { + if items < t.itemHidden.Load() { return errors.New("truncation below tail") } // We need to truncate, save the old size for metrics tracking @@ -403,7 +400,7 @@ func (t *freezerTable) truncateHead(items uint64) error { // Truncate the index file first, the tail position is also considered // when calculating the new freezer table length. - length := items - atomic.LoadUint64(&t.itemOffset) + length := items - t.itemOffset.Load() if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil { return err } @@ -438,7 +435,7 @@ func (t *freezerTable) truncateHead(items uint64) error { } // All data files truncated, set internal counters and return t.headBytes = int64(expected.offset) - atomic.StoreUint64(&t.items, items) + t.items.Store(items) // Retrieve the new size and update the total size counter newSize, err := t.sizeNolock() @@ -455,10 +452,10 @@ func (t *freezerTable) truncateTail(items uint64) error { defer t.lock.Unlock() // Ensure the given truncate target falls in the correct range - if atomic.LoadUint64(&t.itemHidden) >= items { + if t.itemHidden.Load() >= items { return nil } - if atomic.LoadUint64(&t.items) < items { + if t.items.Load() < items { return errors.New("truncation above head") } // Load the new tail index by the given new tail position @@ -466,10 +463,10 @@ func (t *freezerTable) truncateTail(items uint64) error { newTailId uint32 buffer = make([]byte, indexEntrySize) ) - if atomic.LoadUint64(&t.items) == items { + if t.items.Load() == items { newTailId = t.headId } else { - offset := items - atomic.LoadUint64(&t.itemOffset) + offset := items - t.itemOffset.Load() if _, err := t.index.ReadAt(buffer, int64((offset+1)*indexEntrySize)); err != nil { return err } @@ -478,7 +475,7 @@ func (t *freezerTable) truncateTail(items uint64) error { newTailId = newTail.filenum } // Update the virtual tail marker and hidden these entries in table. - atomic.StoreUint64(&t.itemHidden, items) + t.itemHidden.Store(items) if err := writeMetadata(t.meta, newMetadata(items)); err != nil { return err } @@ -501,7 +498,7 @@ func (t *freezerTable) truncateTail(items uint64) error { // Count how many items can be deleted from the file. var ( newDeleted = items - deleted = atomic.LoadUint64(&t.itemOffset) + deleted = t.itemOffset.Load() ) for current := items - 1; current >= deleted; current -= 1 { if _, err := t.index.ReadAt(buffer, int64((current-deleted+1)*indexEntrySize)); err != nil { @@ -541,7 +538,7 @@ func (t *freezerTable) truncateTail(items uint64) error { } // Release any files before the current tail t.tailId = newTailId - atomic.StoreUint64(&t.itemOffset, newDeleted) + t.itemOffset.Store(newDeleted) t.releaseFilesBefore(t.tailId, true) // Retrieve the new size and update the total size counter @@ -654,7 +651,7 @@ func (t *freezerTable) releaseFilesBefore(num uint32, remove bool) { // it will return error. func (t *freezerTable) getIndices(from, count uint64) ([]*indexEntry, error) { // Apply the table-offset - from = from - t.itemOffset + from = from - t.itemOffset.Load() // For reading N items, we need N+1 indices. buffer := make([]byte, (count+1)*indexEntrySize) if _, err := t.index.ReadAt(buffer, int64(from*indexEntrySize)); err != nil { @@ -744,8 +741,8 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i return nil, nil, errClosed } var ( - items = atomic.LoadUint64(&t.items) // the total items(head + 1) - hidden = atomic.LoadUint64(&t.itemHidden) // the number of hidden items + items = t.items.Load() // the total items(head + 1) + hidden = t.itemHidden.Load() // the number of hidden items ) // Ensure the start is written, not deleted from the tail, and that the // caller actually wants something @@ -832,7 +829,7 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i // has returns an indicator whether the specified number data is still accessible // in the freezer table. func (t *freezerTable) has(number uint64) bool { - return atomic.LoadUint64(&t.items) > number && atomic.LoadUint64(&t.itemHidden) <= number + return t.items.Load() > number && t.itemHidden.Load() <= number } // size returns the total data size in the freezer table. @@ -922,7 +919,7 @@ func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) { return } fmt.Fprintf(w, "Version %d count %d, deleted %d, hidden %d\n", meta.Version, - atomic.LoadUint64(&t.items), atomic.LoadUint64(&t.itemOffset), atomic.LoadUint64(&t.itemHidden)) + t.items.Load(), t.itemOffset.Load(), t.itemHidden.Load()) buf := make([]byte, indexEntrySize) diff --git a/core/rawdb/freezer_table_test.go b/core/rawdb/freezer_table_test.go index 6181d4d72cac..97cdaed77355 100644 --- a/core/rawdb/freezer_table_test.go +++ b/core/rawdb/freezer_table_test.go @@ -24,7 +24,6 @@ import ( "os" "path/filepath" "reflect" - "sync/atomic" "testing" "testing/quick" @@ -191,7 +190,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) { writeChunks(t, f, 255, 15) // The last item should be there - if _, err = f.Retrieve(f.items - 1); err != nil { + if _, err = f.Retrieve(f.items.Load() - 1); err != nil { t.Fatal(err) } f.Close() @@ -317,7 +316,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) { writeChunks(t, f, 9, 15) // The last item should be there - if _, err = f.Retrieve(f.items - 1); err != nil { + if _, err = f.Retrieve(f.items.Load() - 1); err != nil { f.Close() t.Fatal(err) } @@ -350,8 +349,8 @@ func TestFreezerRepairDanglingIndex(t *testing.T) { t.Fatal(err) } defer f.Close() - if f.items != 7 { - t.Fatalf("expected %d items, got %d", 7, f.items) + if f.items.Load() != 7 { + t.Fatalf("expected %d items, got %d", 7, f.items.Load()) } if err := assertFileSize(fileToCrop, 15); err != nil { t.Fatal(err) @@ -374,7 +373,7 @@ func TestFreezerTruncate(t *testing.T) { writeChunks(t, f, 30, 15) // The last item should be there - if _, err = f.Retrieve(f.items - 1); err != nil { + if _, err = f.Retrieve(f.items.Load() - 1); err != nil { t.Fatal(err) } f.Close() @@ -388,8 +387,8 @@ func TestFreezerTruncate(t *testing.T) { } defer f.Close() f.truncateHead(10) // 150 bytes - if f.items != 10 { - t.Fatalf("expected %d items, got %d", 10, f.items) + if f.items.Load() != 10 { + t.Fatalf("expected %d items, got %d", 10, f.items.Load()) } // 45, 45, 45, 15 -- bytes should be 15 if f.headBytes != 15 { @@ -444,9 +443,9 @@ func TestFreezerRepairFirstFile(t *testing.T) { if err != nil { t.Fatal(err) } - if f.items != 1 { + if f.items.Load() != 1 { f.Close() - t.Fatalf("expected %d items, got %d", 0, f.items) + t.Fatalf("expected %d items, got %d", 1, f.items.Load()) } // Write 40 bytes @@ -483,7 +482,7 @@ func TestFreezerReadAndTruncate(t *testing.T) { writeChunks(t, f, 30, 15) // The last item should be there - if _, err = f.Retrieve(f.items - 1); err != nil { + if _, err = f.Retrieve(f.items.Load() - 1); err != nil { t.Fatal(err) } f.Close() @@ -495,9 +494,9 @@ func TestFreezerReadAndTruncate(t *testing.T) { if err != nil { t.Fatal(err) } - if f.items != 30 { + if f.items.Load() != 30 { f.Close() - t.Fatalf("expected %d items, got %d", 0, f.items) + t.Fatalf("expected %d items, got %d", 30, f.items.Load()) } for y := byte(0); y < 30; y++ { f.Retrieve(uint64(y)) @@ -1210,13 +1209,13 @@ func runRandTest(rt randTest) bool { rt[i].err = fmt.Errorf("failed to reload table %v", err) } case opCheckAll: - tail := atomic.LoadUint64(&f.itemHidden) - head := atomic.LoadUint64(&f.items) + tail := f.itemHidden.Load() + head := f.items.Load() if tail == head { continue } - got, err := f.RetrieveItems(atomic.LoadUint64(&f.itemHidden), head-tail, 100000) + got, err := f.RetrieveItems(f.itemHidden.Load(), head-tail, 100000) if err != nil { rt[i].err = err } else { @@ -1238,7 +1237,7 @@ func runRandTest(rt randTest) bool { if len(step.items) == 0 { continue } - tail := atomic.LoadUint64(&f.itemHidden) + tail := f.itemHidden.Load() for i := 0; i < len(step.items); i++ { blobs = append(blobs, values[step.items[i]-tail]) } @@ -1254,7 +1253,7 @@ func runRandTest(rt randTest) bool { case opTruncateHead: f.truncateHead(step.target) - length := atomic.LoadUint64(&f.items) - atomic.LoadUint64(&f.itemHidden) + length := f.items.Load() - f.itemHidden.Load() values = values[:length] case opTruncateHeadAll: @@ -1262,10 +1261,10 @@ func runRandTest(rt randTest) bool { values = nil case opTruncateTail: - prev := atomic.LoadUint64(&f.itemHidden) + prev := f.itemHidden.Load() f.truncateTail(step.target) - truncated := atomic.LoadUint64(&f.itemHidden) - prev + truncated := f.itemHidden.Load() - prev values = values[truncated:] case opTruncateTailAll: diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go index 5896e43ce232..630c9029b0f5 100644 --- a/core/rawdb/freezer_test.go +++ b/core/rawdb/freezer_test.go @@ -267,10 +267,10 @@ func TestFreezerReadonlyValidate(t *testing.T) { bBatch := f.tables["b"].newBatch() require.NoError(t, bBatch.AppendRaw(0, item)) require.NoError(t, bBatch.commit()) - if f.tables["a"].items != 3 { + if f.tables["a"].items.Load() != 3 { t.Fatalf("unexpected number of items in table") } - if f.tables["b"].items != 1 { + if f.tables["b"].items.Load() != 1 { t.Fatalf("unexpected number of items in table") } require.NoError(t, f.Close()) diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go index f916a020e7bc..4701acccd399 100644 --- a/core/state/snapshot/difflayer.go +++ b/core/state/snapshot/difflayer.go @@ -103,7 +103,7 @@ type diffLayer struct { memory uint64 // Approximate guess as to how much memory we use root common.Hash // Root hash to which this snapshot diff belongs to - stale uint32 // Signals that the layer became stale (state progressed) + stale atomic.Bool // Signals that the layer became stale (state progressed) // destructSet is a very special helper marker. If an account is marked as // deleted, then it's recorded in this set. However it's allowed that an account @@ -267,7 +267,7 @@ func (dl *diffLayer) Parent() snapshot { // Stale return whether this layer has become stale (was flattened across) or if // it's still live. func (dl *diffLayer) Stale() bool { - return atomic.LoadUint32(&dl.stale) != 0 + return dl.stale.Load() } // Account directly retrieves the account associated with a particular hash in @@ -449,7 +449,7 @@ func (dl *diffLayer) flatten() snapshot { // Before actually writing all our data to the parent, first ensure that the // parent hasn't been 'corrupted' by someone else already flattening into it - if atomic.SwapUint32(&parent.stale, 1) != 0 { + if parent.stale.Swap(true) { panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo } // Overwrite all the updated accounts blindly, merge the sorted list diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 0f3fa2c7a4f0..2e57a059dd7c 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -22,7 +22,6 @@ import ( "errors" "fmt" "sync" - "sync/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" @@ -272,7 +271,7 @@ func (t *Tree) Disable() { case *diffLayer: // If the layer is a simple diff, simply mark as stale layer.lock.Lock() - atomic.StoreUint32(&layer.stale, 1) + layer.stale.Store(true) layer.lock.Unlock() default: @@ -726,7 +725,7 @@ func (t *Tree) Rebuild(root common.Hash) { case *diffLayer: // If the layer is a simple diff, simply mark as stale layer.lock.Lock() - atomic.StoreUint32(&layer.stale, 1) + layer.stale.Store(true) layer.lock.Unlock() default: diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go index 867b47db5319..a63c0c680e70 100644 --- a/core/state_prefetcher.go +++ b/core/state_prefetcher.go @@ -47,7 +47,7 @@ func newStatePrefetcher(config *params.ChainConfig, bc *BlockChain, engine conse // Prefetch processes the state changes according to the Ethereum rules by running // the transaction messages using the statedb, but any changes are discarded. The // only goal is to pre-cache transaction signatures and state trie nodes. -func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *uint32) { +func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *atomic.Bool) { var ( header = block.Header() gaspool = new(GasPool).AddGas(block.GasLimit()) @@ -59,7 +59,7 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c byzantium := p.config.IsByzantium(block.Number()) for i, tx := range block.Transactions() { // If block precaching was interrupted, abort - if interrupt != nil && atomic.LoadUint32(interrupt) == 1 { + if interrupt != nil && interrupt.Load() { return } // Convert the transaction into an executable message and pre-cache its sender diff --git a/core/txpool/list.go b/core/txpool/list.go index 062cbbf63e6a..9c14686181e1 100644 --- a/core/txpool/list.go +++ b/core/txpool/list.go @@ -481,11 +481,7 @@ func (h *priceHeap) Pop() interface{} { // better candidates for inclusion while in other cases (at the top of the baseFee peak) // the floating heap is better. When baseFee is decreasing they behave similarly. type pricedList struct { - // Number of stale price points to (re-heap trigger). - // This field is accessed atomically, and must be the first field - // to ensure it has correct alignment for atomic.AddInt64. - // See https://golang.org/pkg/sync/atomic/#pkg-note-BUG. - stales int64 + stales atomic.Int64 all *lookup // Pointer to the map of all transactions urgent, floating priceHeap // Heaps of prices of all the stored **remote** transactions @@ -519,7 +515,7 @@ func (l *pricedList) Put(tx *types.Transaction, local bool) { // the heap if a large enough ratio of transactions go stale. func (l *pricedList) Removed(count int) { // Bump the stale counter, but exit if still too low (< 25%) - stales := atomic.AddInt64(&l.stales, int64(count)) + stales := l.stales.Add(int64(count)) if int(stales) <= (len(l.urgent.list)+len(l.floating.list))/4 { return } @@ -544,7 +540,7 @@ func (l *pricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool { for len(h.list) > 0 { head := h.list[0] if l.all.GetRemote(head.Hash()) == nil { // Removed or migrated - atomic.AddInt64(&l.stales, -1) + l.stales.Add(-1) heap.Pop(h) continue } @@ -570,7 +566,7 @@ func (l *pricedList) Discard(slots int, force bool) (types.Transactions, bool) { // Discard stale transactions if found during cleanup tx := heap.Pop(&l.urgent).(*types.Transaction) if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated - atomic.AddInt64(&l.stales, -1) + l.stales.Add(-1) continue } // Non stale transaction found, move to floating heap @@ -583,7 +579,7 @@ func (l *pricedList) Discard(slots int, force bool) (types.Transactions, bool) { // Discard stale transactions if found during cleanup tx := heap.Pop(&l.floating).(*types.Transaction) if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated - atomic.AddInt64(&l.stales, -1) + l.stales.Add(-1) continue } // Non stale transaction found, discard it @@ -606,7 +602,7 @@ func (l *pricedList) Reheap() { l.reheapMu.Lock() defer l.reheapMu.Unlock() start := time.Now() - atomic.StoreInt64(&l.stales, 0) + l.stales.Store(0) l.urgent.list = make([]*types.Transaction, 0, l.all.RemoteCount()) l.all.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { l.urgent.list = append(l.urgent.list, tx) diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index 762210b7b74a..fdd1468d4f64 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -23,7 +23,6 @@ import ( "math/big" "sort" "sync" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -375,7 +374,7 @@ func (pool *TxPool) loop() { pool.mu.RLock() pending, queued := pool.stats() pool.mu.RUnlock() - stales := int(atomic.LoadInt64(&pool.priced.stales)) + stales := int(pool.priced.stales.Load()) if pending != prevPending || queued != prevQueued || stales != prevStales { log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) diff --git a/core/txpool/txpool_test.go b/core/txpool/txpool_test.go index 237f97afe434..1102150189a0 100644 --- a/core/txpool/txpool_test.go +++ b/core/txpool/txpool_test.go @@ -59,7 +59,7 @@ func init() { } type testBlockChain struct { - gasLimit uint64 // must be first field for 64 bit alignment (atomic access) + gasLimit atomic.Uint64 statedb *state.StateDB chainHeadFeed *event.Feed } @@ -67,7 +67,7 @@ type testBlockChain struct { func (bc *testBlockChain) CurrentBlock() *types.Header { return &types.Header{ Number: new(big.Int), - GasLimit: atomic.LoadUint64(&bc.gasLimit), + GasLimit: bc.gasLimit.Load(), } } @@ -121,7 +121,8 @@ func setupPool() (*TxPool, *ecdsa.PrivateKey) { func setupPoolWithConfig(config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey) { statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{10000000, statedb, new(event.Feed)} + blockchain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(10000000) key, _ := crypto.GenerateKey() pool := NewTxPool(testTxPoolConfig, config, blockchain) @@ -233,7 +234,8 @@ func TestStateChangeDuringReset(t *testing.T) { // setup pool with 2 transaction in it statedb.SetBalance(address, new(big.Int).SetUint64(params.Ether)) - blockchain := &testChain{&testBlockChain{1000000000, statedb, new(event.Feed)}, address, &trigger} + blockchain := &testChain{&testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)}, address, &trigger} + blockchain.gasLimit.Store(1000000000) tx0 := transaction(0, 100000, key) tx1 := transaction(1, 100000, key) @@ -427,7 +429,9 @@ func TestChainFork(t *testing.T) { statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.AddBalance(addr, big.NewInt(100000000000000)) - pool.chain = &testBlockChain{1000000, statedb, new(event.Feed)} + chain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + chain.gasLimit.Store(1000000) + pool.chain = chain <-pool.requestReset(nil, nil) } resetState() @@ -456,7 +460,9 @@ func TestDoubleNonce(t *testing.T) { statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.AddBalance(addr, big.NewInt(100000000000000)) - pool.chain = &testBlockChain{1000000, statedb, new(event.Feed)} + chain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + chain.gasLimit.Store(1000000) + pool.chain = chain <-pool.requestReset(nil, nil) } resetState() @@ -626,7 +632,7 @@ func TestDropping(t *testing.T) { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4) } // Reduce the block gas limit, check that invalidated transactions are dropped - atomic.StoreUint64(&pool.chain.(*testBlockChain).gasLimit, 100) + pool.chain.(*testBlockChain).gasLimit.Store(100) <-pool.requestReset(nil, nil) if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { @@ -654,7 +660,8 @@ func TestPostponing(t *testing.T) { // Create the pool to test the postponing with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() @@ -866,7 +873,8 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { // Create the pool to test the limit enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) config := testTxPoolConfig config.NoLocals = nolocals @@ -958,7 +966,8 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { // Create the pool to test the non-expiration enforcement statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) config := testTxPoolConfig config.Lifetime = time.Second @@ -1143,7 +1152,8 @@ func TestPendingGlobalLimiting(t *testing.T) { // Create the pool to test the limit enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) config := testTxPoolConfig config.GlobalSlots = config.AccountSlots * 10 @@ -1245,7 +1255,8 @@ func TestCapClearsFromAll(t *testing.T) { // Create the pool to test the limit enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) config := testTxPoolConfig config.AccountSlots = 2 @@ -1279,7 +1290,8 @@ func TestPendingMinimumAllowance(t *testing.T) { // Create the pool to test the limit enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) config := testTxPoolConfig config.GlobalSlots = 1 @@ -1327,7 +1339,8 @@ func TestRepricing(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() @@ -1575,7 +1588,8 @@ func TestRepricingKeepsLocals(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) defer pool.Stop() @@ -1648,7 +1662,8 @@ func TestUnderpricing(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) config := testTxPoolConfig config.GlobalSlots = 2 @@ -1754,7 +1769,8 @@ func TestStableUnderpricing(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) config := testTxPoolConfig config.GlobalSlots = 128 @@ -1986,7 +2002,8 @@ func TestDeduplication(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() @@ -2052,7 +2069,8 @@ func TestReplacement(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() @@ -2257,7 +2275,8 @@ func testJournaling(t *testing.T, nolocals bool) { // Create the original pool to inject transaction into the journal statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) config := testTxPoolConfig config.NoLocals = nolocals @@ -2299,7 +2318,8 @@ func testJournaling(t *testing.T, nolocals bool) { // Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive pool.Stop() statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - blockchain = &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain = &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) pool = NewTxPool(config, params.TestChainConfig, blockchain) @@ -2326,7 +2346,8 @@ func testJournaling(t *testing.T, nolocals bool) { pool.Stop() statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - blockchain = &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain = &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) pool = NewTxPool(config, params.TestChainConfig, blockchain) pending, queued = pool.Stats() @@ -2355,7 +2376,8 @@ func TestStatusCheck(t *testing.T) { // Create the pool to test the status retrievals with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := &testBlockChain{atomic.Uint64{}, statedb, new(event.Feed)} + blockchain.gasLimit.Store(1000000) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() diff --git a/core/types.go b/core/types.go index 4c5b74a49865..36eb0d1dedbe 100644 --- a/core/types.go +++ b/core/types.go @@ -17,6 +17,8 @@ package core import ( + "sync/atomic" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -39,7 +41,7 @@ type Prefetcher interface { // Prefetch processes the state changes according to the Ethereum rules by running // the transaction messages using the statedb, but any changes are discarded. The // only goal is to pre-cache transaction signatures and state trie nodes. - Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *uint32) + Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *atomic.Bool) } // Processor is an interface for processing blocks using a given initial state. diff --git a/core/vm/evm.go b/core/vm/evm.go index d78ea0792664..0084e4ca51fc 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -114,8 +114,7 @@ type EVM struct { // used throughout the execution of the tx. interpreter *EVMInterpreter // abort is used to abort the EVM calling operations - // NOTE: must be set atomically - abort int32 + abort atomic.Bool // callGasTemp holds the gas available for the current call. This is needed because the // available gas is calculated in gasCall* according to the 63/64 rule and later // applied in opCall*. @@ -147,12 +146,12 @@ func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) { // Cancel cancels any running EVM operation. This may be called concurrently and // it's safe to be called multiple times. func (evm *EVM) Cancel() { - atomic.StoreInt32(&evm.abort, 1) + evm.abort.Store(true) } // Cancelled returns true if Cancel has been called func (evm *EVM) Cancelled() bool { - return atomic.LoadInt32(&evm.abort) == 1 + return evm.abort.Load() } // Interpreter returns the current interpreter diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 77b6e02bfcc7..21d8cc215ad9 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -17,8 +17,6 @@ package vm import ( - "sync/atomic" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -531,7 +529,7 @@ func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b } func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - if atomic.LoadInt32(&interpreter.evm.abort) != 0 { + if interpreter.evm.abort.Load() { return nil, errStopToken } pos := scope.Stack.pop() @@ -543,7 +541,7 @@ func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt } func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - if atomic.LoadInt32(&interpreter.evm.abort) != 0 { + if interpreter.evm.abort.Load() { return nil, errStopToken } pos, cond := scope.Stack.pop(), scope.Stack.pop() diff --git a/eth/backend.go b/eth/backend.go index 6368c0e03c56..56b3da6a1f9e 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -23,7 +23,6 @@ import ( "math/big" "runtime" "sync" - "sync/atomic" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" @@ -442,7 +441,7 @@ func (s *Ethereum) StartMining(threads int) error { } // If mining is started, we can disable the transaction rejection mechanism // introduced to speed sync times. - atomic.StoreUint32(&s.handler.acceptTxs, 1) + s.handler.acceptTxs.Store(true) go s.miner.Start() } @@ -474,8 +473,8 @@ func (s *Ethereum) Engine() consensus.Engine { return s.engine } func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb } func (s *Ethereum) IsListening() bool { return true } // Always listening func (s *Ethereum) Downloader() *downloader.Downloader { return s.handler.downloader } -func (s *Ethereum) Synced() bool { return atomic.LoadUint32(&s.handler.acceptTxs) == 1 } -func (s *Ethereum) SetSynced() { atomic.StoreUint32(&s.handler.acceptTxs, 1) } +func (s *Ethereum) Synced() bool { return s.handler.acceptTxs.Load() } +func (s *Ethereum) SetSynced() { s.handler.acceptTxs.Store(true) } func (s *Ethereum) ArchiveMode() bool { return s.config.NoPruning } func (s *Ethereum) BloomIndexer() *core.ChainIndexer { return s.bloomIndexer } func (s *Ethereum) Merger() *consensus.Merger { return s.merger } diff --git a/eth/downloader/beaconsync.go b/eth/downloader/beaconsync.go index ff985e6b035f..df8af68bc798 100644 --- a/eth/downloader/beaconsync.go +++ b/eth/downloader/beaconsync.go @@ -19,7 +19,6 @@ package downloader import ( "fmt" "sync" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -371,7 +370,7 @@ func (d *Downloader) fetchBeaconHeaders(from uint64) error { continue } // If the pivot block is committed, signal header sync termination - if atomic.LoadInt32(&d.committed) == 1 { + if d.committed.Load() { select { case d.headerProcCh <- nil: return nil diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index fb9de79912e2..aa61102c0bb1 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -98,7 +98,7 @@ type headerTask struct { } type Downloader struct { - mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode + mode atomic.Uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode mux *event.TypeMux // Event multiplexer to announce sync operation events checkpoint uint64 // Checkpoint block number to enforce head against (e.g. snap sync) @@ -122,9 +122,9 @@ type Downloader struct { // Status synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing - synchronising int32 + synchronising atomic.Bool notified int32 - committed int32 + committed atomic.Bool ancientLimit uint64 // The maximum block number which can be regarded as ancient data. // Channels @@ -292,7 +292,7 @@ func (d *Downloader) Progress() ethereum.SyncProgress { // Synchronising returns whether the downloader is currently retrieving blocks. func (d *Downloader) Synchronising() bool { - return atomic.LoadInt32(&d.synchronising) > 0 + return d.synchronising.Load() } // RegisterPeer injects a new download peer into the set of block source to be @@ -392,10 +392,10 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, return d.synchroniseMock(id, hash) } // Make sure only one goroutine is ever allowed past this point at once - if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { + if !d.synchronising.CompareAndSwap(false, true) { return errBusy } - defer atomic.StoreInt32(&d.synchronising, 0) + defer d.synchronising.Store(false) // Post a user notification of the sync (only once per session) if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { @@ -435,7 +435,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, defer d.Cancel() // No matter what, we can't leave the cancel channel open // Atomically set the requested sync mode - atomic.StoreUint32(&d.mode, uint32(mode)) + d.mode.Store(uint32(mode)) // Retrieve the origin peer and initiate the downloading process var p *peerConnection @@ -452,7 +452,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, } func (d *Downloader) getMode() SyncMode { - return SyncMode(atomic.LoadUint32(&d.mode)) + return SyncMode(d.mode.Load()) } // syncWithPeer starts a block synchronization based on the hash chain from the @@ -562,9 +562,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber) } } - d.committed = 1 + d.committed.Store(true) if mode == SnapSync && pivot.Number.Uint64() != 0 { - d.committed = 0 + d.committed.Store(false) } if mode == SnapSync { // Set the ancient data limitation. If we are running snap sync, all block @@ -1128,7 +1128,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e // If no more headers are inbound, notify the content fetchers and return if len(headers) == 0 { // Don't abort header fetches while the pivot is downloading - if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { + if !d.committed.Load() && pivot <= from { p.log.Debug("No headers, waiting for pivot commit") select { case <-time.After(fsHeaderContCheck): @@ -1669,7 +1669,7 @@ func (d *Downloader) processSnapSyncContent() error { results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) } // Split around the pivot block and process the two sides via snap/full sync - if atomic.LoadInt32(&d.committed) == 0 { + if !d.committed.Load() { latest := results[len(results)-1].Header // If the height is above the pivot block by 2 sets, it means the pivot // become stale in the network and it was garbage collected, move to a @@ -1794,7 +1794,7 @@ func (d *Downloader) commitPivotBlock(result *fetchResult) error { if err := d.blockchain.SnapSyncCommitHead(block.Hash()); err != nil { return err } - atomic.StoreInt32(&d.committed, 1) + d.committed.Store(true) return nil } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index a884c1e950b0..d71e6dd6b7f8 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -476,9 +476,9 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { tester.newPeer("peer", protocol, testChainBase.blocks[1:]) // Wrap the importer to allow stepping - blocked, proceed := uint32(0), make(chan struct{}) + blocked, proceed := atomic.Uint32{}, make(chan struct{}) tester.downloader.chainInsertHook = func(results []*fetchResult) { - atomic.StoreUint32(&blocked, uint32(len(results))) + blocked.Store(uint32(len(results))) <-proceed } // Start a synchronisation concurrently @@ -505,7 +505,7 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { tester.downloader.queue.resultCache.lock.Lock() { cached = tester.downloader.queue.resultCache.countCompleted() - frozen = int(atomic.LoadUint32(&blocked)) + frozen = int(blocked.Load()) retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1 } tester.downloader.queue.resultCache.lock.Unlock() @@ -528,8 +528,8 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1) } // Permit the blocked blocks to import - if atomic.LoadUint32(&blocked) > 0 { - atomic.StoreUint32(&blocked, uint32(0)) + if blocked.Load() > 0 { + blocked.Store(0) proceed <- struct{}{} } } @@ -786,12 +786,12 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { tester.newPeer("peer", protocol, chain.blocks[1:]) // Instrument the downloader to signal body requests - bodiesHave, receiptsHave := int32(0), int32(0) + bodiesHave, receiptsHave := atomic.Int32{}, atomic.Int32{} tester.downloader.bodyFetchHook = func(headers []*types.Header) { - atomic.AddInt32(&bodiesHave, int32(len(headers))) + bodiesHave.Add(int32(len(headers))) } tester.downloader.receiptFetchHook = func(headers []*types.Header) { - atomic.AddInt32(&receiptsHave, int32(len(headers))) + receiptsHave.Add(int32(len(headers))) } // Synchronise with the peer and make sure all blocks were retrieved if err := tester.sync("peer", nil, mode); err != nil { @@ -811,11 +811,11 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { receiptsNeeded++ } } - if int(bodiesHave) != bodiesNeeded { - t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded) + if int(bodiesHave.Load()) != bodiesNeeded { + t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave.Load(), bodiesNeeded) } - if int(receiptsHave) != receiptsNeeded { - t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded) + if int(receiptsHave.Load()) != receiptsNeeded { + t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave.Load(), receiptsNeeded) } } diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 5af5068c98cf..e9907297a0b9 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -61,7 +61,7 @@ type fetchRequest struct { // fetchResult is a struct collecting partial results from data fetchers until // all outstanding pieces complete and the result as a whole can be processed. type fetchResult struct { - pending int32 // Flag telling what deliveries are outstanding + pending atomic.Int32 // Flag telling what deliveries are outstanding Header *types.Header Uncles []*types.Header @@ -75,38 +75,38 @@ func newFetchResult(header *types.Header, fastSync bool) *fetchResult { Header: header, } if !header.EmptyBody() { - item.pending |= (1 << bodyType) + item.pending.Store(item.pending.Load() | (1 << bodyType)) } else if header.WithdrawalsHash != nil { item.Withdrawals = make(types.Withdrawals, 0) } if fastSync && !header.EmptyReceipts() { - item.pending |= (1 << receiptType) + item.pending.Store(item.pending.Load() | (1 << receiptType)) } return item } // SetBodyDone flags the body as finished. func (f *fetchResult) SetBodyDone() { - if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 { - atomic.AddInt32(&f.pending, -1) + if v := f.pending.Load(); (v & (1 << bodyType)) != 0 { + f.pending.Add(-1) } } // AllDone checks if item is done. func (f *fetchResult) AllDone() bool { - return atomic.LoadInt32(&f.pending) == 0 + return f.pending.Load() == 0 } // SetReceiptsDone flags the receipts as finished. func (f *fetchResult) SetReceiptsDone() { - if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 { - atomic.AddInt32(&f.pending, -2) + if v := f.pending.Load(); (v & (1 << receiptType)) != 0 { + f.pending.Add(-2) } } // Done checks if the given type is done already func (f *fetchResult) Done(kind uint) bool { - v := atomic.LoadInt32(&f.pending) + v := f.pending.Load() return v&(1<= int32(len(r.items)) { break @@ -156,7 +156,7 @@ func (r *resultStore) countCompleted() int { break } } - atomic.StoreInt32(&r.indexIncomplete, index) + r.indexIncomplete.Store(index) return int(index) } @@ -179,7 +179,7 @@ func (r *resultStore) GetCompleted(limit int) []*fetchResult { } // Advance the expected block number of the first cache entry r.resultOffset += uint64(limit) - atomic.AddInt32(&r.indexIncomplete, int32(-limit)) + r.indexIncomplete.Add(int32(-limit)) return results } diff --git a/eth/downloader/skeleton_test.go b/eth/downloader/skeleton_test.go index b19494a7b069..6a76d78ac817 100644 --- a/eth/downloader/skeleton_test.go +++ b/eth/downloader/skeleton_test.go @@ -82,8 +82,8 @@ type skeletonTestPeer struct { serve func(origin uint64) []*types.Header // Hook to allow custom responses - served uint64 // Number of headers served by this peer - dropped uint64 // Flag whether the peer was dropped (stop responding) + served atomic.Uint64 // Number of headers served by this peer + dropped atomic.Uint64 // Flag whether the peer was dropped (stop responding) } // newSkeletonTestPeer creates a new mock peer to test the skeleton sync with. @@ -113,7 +113,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski // Since skeleton test peer are in-memory mocks, dropping the does not make // them inaccessible. As such, check a local `dropped` field to see if the // peer has been dropped and should not respond any more. - if atomic.LoadUint64(&p.dropped) != 0 { + if p.dropped.Load() != 0 { return nil, errors.New("peer already dropped") } // Skeleton sync retrieves batches of headers going backward without gaps. @@ -161,7 +161,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski } } } - atomic.AddUint64(&p.served, uint64(len(headers))) + p.served.Add(uint64(len(headers))) hashes := make([]common.Hash, len(headers)) for i, header := range headers { @@ -182,7 +182,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski sink <- res if err := <-res.Done; err != nil { log.Warn("Skeleton test peer response rejected", "err", err) - atomic.AddUint64(&p.dropped, 1) + p.dropped.Add(1) } }() return req, nil @@ -817,7 +817,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) { dropped := make(map[string]int) drop := func(peer string) { if p := peerset.Peer(peer); p != nil { - atomic.AddUint64(&p.peer.(*skeletonTestPeer).dropped, 1) + p.peer.(*skeletonTestPeer).dropped.Add(1) } peerset.Unregister(peer) dropped[peer]++ @@ -895,14 +895,14 @@ func TestSkeletonSyncRetrievals(t *testing.T) { if !tt.unpredictable { var served uint64 for _, peer := range tt.peers { - served += atomic.LoadUint64(&peer.served) + served += peer.served.Load() } if served != tt.midserve { t.Errorf("test %d, mid state: served headers mismatch: have %d, want %d", i, served, tt.midserve) } var drops uint64 for _, peer := range tt.peers { - drops += atomic.LoadUint64(&peer.dropped) + drops += peer.dropped.Load() } if drops != tt.middrop { t.Errorf("test %d, mid state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop) @@ -950,20 +950,20 @@ func TestSkeletonSyncRetrievals(t *testing.T) { if !tt.unpredictable { served := uint64(0) for _, peer := range tt.peers { - served += atomic.LoadUint64(&peer.served) + served += peer.served.Load() } if tt.newPeer != nil { - served += atomic.LoadUint64(&tt.newPeer.served) + served += tt.newPeer.served.Load() } if served != tt.endserve { t.Errorf("test %d, end state: served headers mismatch: have %d, want %d", i, served, tt.endserve) } drops := uint64(0) for _, peer := range tt.peers { - drops += atomic.LoadUint64(&peer.dropped) + drops += peer.dropped.Load() } if tt.newPeer != nil { - drops += atomic.LoadUint64(&tt.newPeer.dropped) + drops += tt.newPeer.dropped.Load() } if drops != tt.enddrop { t.Errorf("test %d, end state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop) diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index 9e5693c02e5a..aa6da66008ea 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -413,13 +413,13 @@ func testConcurrentAnnouncements(t *testing.T, light bool) { secondHeaderFetcher := tester.makeHeaderFetcher("second", blocks, -gatherSlack) secondBodyFetcher := tester.makeBodyFetcher("second", blocks, 0) - counter := uint32(0) + counter := atomic.Uint32{} firstHeaderWrapper := func(hash common.Hash, sink chan *eth.Response) (*eth.Request, error) { - atomic.AddUint32(&counter, 1) + counter.Add(1) return firstHeaderFetcher(hash, sink) } secondHeaderWrapper := func(hash common.Hash, sink chan *eth.Response) (*eth.Request, error) { - atomic.AddUint32(&counter, 1) + counter.Add(1) return secondHeaderFetcher(hash, sink) } // Iteratively announce blocks until all are imported @@ -446,8 +446,8 @@ func testConcurrentAnnouncements(t *testing.T, light bool) { verifyImportDone(t, imported) // Make sure no blocks were retrieved twice - if int(counter) != targetBlocks { - t.Fatalf("retrieval count mismatch: have %v, want %v", counter, targetBlocks) + if int(counter.Load()) != targetBlocks { + t.Fatalf("retrieval count mismatch: have %v, want %v", counter.Load(), targetBlocks) } verifyChainHeight(t, tester, uint64(len(hashes)-1)) } @@ -513,9 +513,9 @@ func testPendingDeduplication(t *testing.T, light bool) { bodyFetcher := tester.makeBodyFetcher("repeater", blocks, 0) delay := 50 * time.Millisecond - counter := uint32(0) + counter := atomic.Uint32{} headerWrapper := func(hash common.Hash, sink chan *eth.Response) (*eth.Request, error) { - atomic.AddUint32(&counter, 1) + counter.Add(1) // Simulate a long running fetch resink := make(chan *eth.Response) @@ -545,8 +545,8 @@ func testPendingDeduplication(t *testing.T, light bool) { time.Sleep(delay) // Check that all blocks were imported and none fetched twice - if int(counter) != 1 { - t.Fatalf("retrieval count mismatch: have %v, want %v", counter, 1) + if int(counter.Load()) != 1 { + t.Fatalf("retrieval count mismatch: have %v, want %v", counter.Load(), 1) } verifyChainHeight(t, tester, 1) } @@ -632,9 +632,9 @@ func TestImportDeduplication(t *testing.T) { headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) - counter := uint32(0) + counter := atomic.Uint32{} tester.fetcher.insertChain = func(blocks types.Blocks) (int, error) { - atomic.AddUint32(&counter, uint32(len(blocks))) + counter.Add(uint32(len(blocks))) return tester.insertChain(blocks) } // Instrument the fetching and imported events @@ -655,8 +655,8 @@ func TestImportDeduplication(t *testing.T) { tester.fetcher.Enqueue("valid", blocks[hashes[1]]) verifyImportCount(t, imported, 2) - if counter != 2 { - t.Fatalf("import invocation count mismatch: have %v, want %v", counter, 2) + if counter.Load() != 2 { + t.Fatalf("import invocation count mismatch: have %v, want %v", counter.Load(), 2) } } @@ -853,13 +853,13 @@ func TestHashMemoryExhaustionAttack(t *testing.T) { // Create a tester with instrumented import hooks tester := newTester(false) - imported, announces := make(chan interface{}), int32(0) + imported, announces := make(chan interface{}), atomic.Int32{} tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } tester.fetcher.announceChangeHook = func(hash common.Hash, added bool) { if added { - atomic.AddInt32(&announces, 1) + announces.Add(1) } else { - atomic.AddInt32(&announces, -1) + announces.Add(-1) } } // Create a valid chain and an infinite junk chain @@ -879,7 +879,7 @@ func TestHashMemoryExhaustionAttack(t *testing.T) { } tester.fetcher.Notify("attacker", attack[i], 1 /* don't distance drop */, time.Now(), attackerHeaderFetcher, attackerBodyFetcher) } - if count := atomic.LoadInt32(&announces); count != hashLimit+maxQueueDist { + if count := announces.Load(); count != hashLimit+maxQueueDist { t.Fatalf("queued announce count mismatch: have %d, want %d", count, hashLimit+maxQueueDist) } // Wait for fetches to complete @@ -900,13 +900,13 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) { // Create a tester with instrumented import hooks tester := newTester(false) - imported, enqueued := make(chan interface{}), int32(0) + imported, enqueued := make(chan interface{}), atomic.Int32{} tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } tester.fetcher.queueChangeHook = func(hash common.Hash, added bool) { if added { - atomic.AddInt32(&enqueued, 1) + enqueued.Add(1) } else { - atomic.AddInt32(&enqueued, -1) + enqueued.Add(-1) } } // Create a valid chain and a batch of dangling (but in range) blocks @@ -924,7 +924,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) { tester.fetcher.Enqueue("attacker", block) } time.Sleep(200 * time.Millisecond) - if queued := atomic.LoadInt32(&enqueued); queued != blockLimit { + if queued := enqueued.Load(); queued != blockLimit { t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit) } // Queue up a batch of valid blocks, and check that a new peer is allowed to do so @@ -932,7 +932,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) { tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-3-i]]) } time.Sleep(100 * time.Millisecond) - if queued := atomic.LoadInt32(&enqueued); queued != blockLimit+maxQueueDist-1 { + if queued := enqueued.Load(); queued != blockLimit+maxQueueDist-1 { t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit+maxQueueDist-1) } // Insert the missing piece (and sanity check the import) diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 47cc31999e01..80bf613bdaee 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -252,9 +252,10 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast oldestBlock := lastBlock + 1 - uint64(blocks) var ( - next = oldestBlock + next = atomic.Uint64{} results = make(chan *blockFees, blocks) ) + next.Store(oldestBlock) percentileKey := make([]byte, 8*len(rewardPercentiles)) for i, p := range rewardPercentiles { binary.LittleEndian.PutUint64(percentileKey[i*8:(i+1)*8], math.Float64bits(p)) @@ -263,7 +264,7 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast go func() { for { // Retrieve the next block number to fetch with this goroutine - blockNumber := atomic.AddUint64(&next, 1) - 1 + blockNumber := next.Add(1) - 1 if blockNumber > lastBlock { return } diff --git a/eth/handler.go b/eth/handler.go index 83df6ff2eb60..6ccfedbdc05b 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -93,8 +93,8 @@ type handler struct { networkID uint64 forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node - snapSync uint32 // Flag whether snap sync is enabled (gets disabled if we already have blocks) - acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing) + snapSync atomic.Bool // Flag whether snap sync is enabled (gets disabled if we already have blocks) + acceptTxs atomic.Bool // Flag whether we're considered synchronised (enables transaction processing) checkpointNumber uint64 // Block number for the sync progress validator to cross reference checkpointHash common.Hash // Block hash for the sync progress validator to cross reference @@ -154,7 +154,7 @@ func newHandler(config *handlerConfig) (*handler, error) { // In these cases however it's safe to reenable snap sync. fullBlock, snapBlock := h.chain.CurrentBlock(), h.chain.CurrentSnapBlock() if fullBlock.Number.Uint64() == 0 && snapBlock.Number.Uint64() > 0 { - h.snapSync = uint32(1) + h.snapSync.Store(true) log.Warn("Switch sync mode from full sync to snap sync") } } else { @@ -163,7 +163,7 @@ func newHandler(config *handlerConfig) (*handler, error) { log.Warn("Switch sync mode from snap sync to full sync") } else { // If snap sync was requested and our database is empty, grant it - h.snapSync = uint32(1) + h.snapSync.Store(true) } } // If we have trusted checkpoints, enforce them on the chain @@ -176,9 +176,9 @@ func newHandler(config *handlerConfig) (*handler, error) { success := func() { // If we were running snap sync and it finished, disable doing another // round on next sync cycle - if atomic.LoadUint32(&h.snapSync) == 1 { + if h.snapSync.Load() { log.Info("Snap sync complete, auto disabling") - atomic.StoreUint32(&h.snapSync, 0) + h.snapSync.Store(false) } // If we've successfully finished a sync cycle and passed any required // checkpoint, enable accepting transactions from the network @@ -187,7 +187,7 @@ func newHandler(config *handlerConfig) (*handler, error) { // Checkpoint passed, sanity check the timestamp to have a fallback mechanism // for non-checkpointed (number = 0) private networks. if head.Time >= uint64(time.Now().AddDate(0, -1, 0).Unix()) { - atomic.StoreUint32(&h.acceptTxs, 1) + h.acceptTxs.Store(true) } } } @@ -259,7 +259,7 @@ func newHandler(config *handlerConfig) (*handler, error) { // accept each others' blocks until a restart. Unfortunately we haven't figured // out a way yet where nodes can decide unilaterally whether the network is new // or not. This should be fixed if we figure out a solution. - if atomic.LoadUint32(&h.snapSync) == 1 { + if h.snapSync.Load() { log.Warn("Snap syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash()) return 0, nil } @@ -288,7 +288,7 @@ func newHandler(config *handlerConfig) (*handler, error) { } n, err := h.chain.InsertChain(blocks) if err == nil { - atomic.StoreUint32(&h.acceptTxs, 1) // Mark initial sync done on any fetcher import + h.acceptTxs.Store(true) // Mark initial sync done on any fetcher import } return n, err } @@ -337,7 +337,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { return err } reject := false // reserved peer slots - if atomic.LoadUint32(&h.snapSync) == 1 { + if h.snapSync.Load() { if snap == nil { // If we are running snap-sync, we want to reserve roughly half the peer // slots for peers supporting the snap protocol. @@ -411,7 +411,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { // If we're doing a snap sync, we must enforce the checkpoint // block to avoid eclipse attacks. Unsynced nodes are welcome // to connect after we're done joining the network. - if atomic.LoadUint32(&h.snapSync) == 1 { + if h.snapSync.Load() { peer.Log().Warn("Dropping unsynced node during sync", "addr", peer.RemoteAddr(), "type", peer.Name()) res.Done <- errors.New("unsynced node cannot serve sync") return diff --git a/eth/handler_eth.go b/eth/handler_eth.go index 4ed6335769cf..00be022d9e9c 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -19,7 +19,6 @@ package eth import ( "fmt" "math/big" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -52,7 +51,7 @@ func (h *ethHandler) PeerInfo(id enode.ID) interface{} { // AcceptTxs retrieves whether transaction processing is enabled on the node // or if inbound transactions should simply be dropped. func (h *ethHandler) AcceptTxs() bool { - return atomic.LoadUint32(&h.acceptTxs) == 1 + return h.acceptTxs.Load() } // Handle is invoked from a peer's message handler when it receives a new remote diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index 9f0dd8ec5de4..cf75c17d7202 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -20,7 +20,6 @@ import ( "fmt" "math/big" "math/rand" - "sync/atomic" "testing" "time" @@ -251,7 +250,7 @@ func testRecvTransactions(t *testing.T, protocol uint) { handler := newTestHandler() defer handler.close() - handler.handler.acceptTxs = 1 // mark synced to accept transactions + handler.handler.acceptTxs.Store(true) // mark synced to accept transactions txs := make(chan core.NewTxsEvent) sub := handler.txpool.SubscribeNewTxsEvent(txs) @@ -397,7 +396,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) { // to receive them. We need multiple sinks since a one-to-one peering would // broadcast all transactions without announcement. source := newTestHandler() - source.handler.snapSync = 0 // Avoid requiring snap, otherwise some will be dropped below + source.handler.snapSync.Store(false) // Avoid requiring snap, otherwise some will be dropped below defer source.close() sinks := make([]*testHandler, 10) @@ -405,7 +404,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) { sinks[i] = newTestHandler() defer sinks[i].close() - sinks[i].handler.acceptTxs = 1 // mark synced to accept transactions + sinks[i].handler.acceptTxs.Store(true) // mark synced to accept transactions } // Interconnect all the sink handlers with the source handler for i, sink := range sinks { @@ -510,9 +509,9 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo defer handler.close() if syncmode == downloader.SnapSync { - atomic.StoreUint32(&handler.handler.snapSync, 1) + handler.handler.snapSync.Store(true) } else { - atomic.StoreUint32(&handler.handler.snapSync, 0) + handler.handler.snapSync.Store(false) } var response *types.Header if checkpoint { diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index 13279fd96c43..e99eb8843663 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -449,10 +449,10 @@ type Syncer struct { trienodeHealReqs map[uint64]*trienodeHealRequest // Trie node requests currently running bytecodeHealReqs map[uint64]*bytecodeHealRequest // Bytecode requests currently running - trienodeHealRate float64 // Average heal rate for processing trie node data - trienodeHealPend uint64 // Number of trie nodes currently pending for processing - trienodeHealThrottle float64 // Divisor for throttling the amount of trienode heal data requested - trienodeHealThrottled time.Time // Timestamp the last time the throttle was updated + trienodeHealRate float64 // Average heal rate for processing trie node data + trienodeHealPend atomic.Uint64 // Number of trie nodes currently pending for processing + trienodeHealThrottle float64 // Divisor for throttling the amount of trienode heal data requested + trienodeHealThrottled time.Time // Timestamp the last time the throttle was updated trienodeHealSynced uint64 // Number of state trie nodes downloaded trienodeHealBytes common.StorageSize // Number of state trie bytes persisted to disk @@ -2189,7 +2189,7 @@ func (s *Syncer) processTrienodeHealResponse(res *trienodeHealResponse) { // HR(N) = (1-MI)^N*(OR-NR) + NR s.trienodeHealRate = gomath.Pow(1-trienodeHealRateMeasurementImpact, float64(fills))*(s.trienodeHealRate-rate) + rate - pending := atomic.LoadUint64(&s.trienodeHealPend) + pending := s.trienodeHealPend.Load() if time.Since(s.trienodeHealThrottled) > time.Second { // Periodically adjust the trie node throttler if float64(pending) > 2*s.trienodeHealRate { @@ -2776,9 +2776,9 @@ func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error return errors.New("unexpected healing trienode") } // Response validated, send it to the scheduler for filling - atomic.AddUint64(&s.trienodeHealPend, fills) + s.trienodeHealPend.Add(fills) defer func() { - atomic.AddUint64(&s.trienodeHealPend, ^(fills - 1)) + s.trienodeHealPend.Add(^(fills - 1)) }() response := &trienodeHealResponse{ paths: req.paths, diff --git a/eth/sync.go b/eth/sync.go index 6d764ef4822b..f59a1d965784 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -19,7 +19,6 @@ package eth import ( "errors" "math/big" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -205,7 +204,7 @@ func peerToSyncOp(mode downloader.SyncMode, p *eth.Peer) *chainSyncOp { func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { // If we're in snap sync mode, return that directly - if atomic.LoadUint32(&cs.handler.snapSync) == 1 { + if cs.handler.snapSync.Load() { block := cs.handler.chain.CurrentSnapBlock() td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64()) return downloader.SnapSync, td @@ -256,9 +255,9 @@ func (h *handler) doSync(op *chainSyncOp) error { if err != nil { return err } - if atomic.LoadUint32(&h.snapSync) == 1 { + if h.snapSync.Load() { log.Info("Snap sync complete, auto disabling") - atomic.StoreUint32(&h.snapSync, 0) + h.snapSync.Store(false) } // If we've successfully finished a sync cycle and passed any required checkpoint, // enable accepting transactions from the network. @@ -267,7 +266,7 @@ func (h *handler) doSync(op *chainSyncOp) error { // Checkpoint passed, sanity check the timestamp to have a fallback mechanism // for non-checkpointed (number = 0) private networks. if head.Time >= uint64(time.Now().AddDate(0, -1, 0).Unix()) { - atomic.StoreUint32(&h.acceptTxs, 1) + h.acceptTxs.Store(true) } } if head.Number.Uint64() > 0 { diff --git a/eth/sync_test.go b/eth/sync_test.go index 0b9f9e1bbaaf..b5e00298b9e9 100644 --- a/eth/sync_test.go +++ b/eth/sync_test.go @@ -17,7 +17,6 @@ package eth import ( - "sync/atomic" "testing" "time" @@ -39,14 +38,14 @@ func testSnapSyncDisabling(t *testing.T, ethVer uint, snapVer uint) { // Create an empty handler and ensure it's in snap sync mode empty := newTestHandler() - if atomic.LoadUint32(&empty.handler.snapSync) == 0 { + if !empty.handler.snapSync.Load() { t.Fatalf("snap sync disabled on pristine blockchain") } defer empty.close() // Create a full handler and ensure snap sync ends up disabled full := newTestHandlerWithBlocks(1024) - if atomic.LoadUint32(&full.handler.snapSync) == 1 { + if full.handler.snapSync.Load() { t.Fatalf("snap sync not disabled on non-empty blockchain") } defer full.close() @@ -91,7 +90,7 @@ func testSnapSyncDisabling(t *testing.T, ethVer uint, snapVer uint) { if err := empty.handler.doSync(op); err != nil { t.Fatal("sync failed:", err) } - if atomic.LoadUint32(&empty.handler.snapSync) == 1 { + if empty.handler.snapSync.Load() { t.Fatalf("snap sync not disabled after successful synchronisation") } } diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index b0dae4ca3e0d..d57f0386c323 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -835,8 +835,8 @@ func TestTraceChain(t *testing.T) { signer := types.HomesteadSigner{} var ( - ref uint32 // total refs has made - rel uint32 // total rels has made + ref atomic.Uint32 // total refs has made + rel atomic.Uint32 // total rels has made nonce uint64 ) backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { @@ -849,8 +849,8 @@ func TestTraceChain(t *testing.T) { nonce += 1 } }) - backend.refHook = func() { atomic.AddUint32(&ref, 1) } - backend.relHook = func() { atomic.AddUint32(&rel, 1) } + backend.refHook = func() { ref.Add(1) } + backend.relHook = func() { rel.Add(1) } api := NewAPI(backend) single := `{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}` @@ -863,7 +863,9 @@ func TestTraceChain(t *testing.T) { {10, 20, nil}, // the middle chain range, blocks [11, 20] } for _, c := range cases { - ref, rel = 0, 0 // clean up the counters + // clean up the counters + ref.Store(0) + rel.Store(0) from, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.start)) to, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.end)) @@ -889,7 +891,7 @@ func TestTraceChain(t *testing.T) { t.Error("Missing tracing block") } if ref != rel { - t.Errorf("Ref and deref actions are not equal, ref %d rel %d", ref, rel) + t.Errorf("Ref and deref actions are not equal, ref %d rel %d", ref.Load(), rel.Load()) } } } diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go index 5e75318b9a92..c7f171c5bdf9 100644 --- a/eth/tracers/logger/logger.go +++ b/eth/tracers/logger/logger.go @@ -116,8 +116,8 @@ type StructLogger struct { gasLimit uint64 usedGas uint64 - interrupt uint32 // Atomic flag to signal execution interruption - reason error // Textual reason for the interruption + interrupt atomic.Bool // Atomic flag to signal execution interruption + reason error // Textual reason for the interruption } // NewStructLogger returns a new logger @@ -149,7 +149,7 @@ func (l *StructLogger) CaptureStart(env *vm.EVM, from common.Address, to common. // CaptureState also tracks SLOAD/SSTORE ops to track storage change. func (l *StructLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { // If tracing was interrupted, set the error and stop - if atomic.LoadUint32(&l.interrupt) > 0 { + if l.interrupt.Load() { return } // check if already accumulated the specified number of logs @@ -258,7 +258,7 @@ func (l *StructLogger) GetResult() (json.RawMessage, error) { // Stop terminates execution of the tracer at the first opportune moment. func (l *StructLogger) Stop(err error) { l.reason = err - atomic.StoreUint32(&l.interrupt, 1) + l.interrupt.Store(true) } func (l *StructLogger) CaptureTxStart(gasLimit uint64) { diff --git a/eth/tracers/native/4byte.go b/eth/tracers/native/4byte.go index 1b4649baa33e..5a2c4f91115f 100644 --- a/eth/tracers/native/4byte.go +++ b/eth/tracers/native/4byte.go @@ -48,7 +48,7 @@ func init() { type fourByteTracer struct { noopTracer ids map[string]int // ids aggregates the 4byte ids found - interrupt uint32 // Atomic flag to signal execution interruption + interrupt atomic.Bool // Atomic flag to signal execution interruption reason error // Textual reason for the interruption activePrecompiles []common.Address // Updated on CaptureStart based on given rules } @@ -93,7 +93,7 @@ func (t *fourByteTracer) CaptureStart(env *vm.EVM, from common.Address, to commo // CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). func (t *fourByteTracer) CaptureEnter(op vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { // Skip if tracing was interrupted - if atomic.LoadUint32(&t.interrupt) > 0 { + if t.interrupt.Load() { return } if len(input) < 4 { @@ -124,7 +124,7 @@ func (t *fourByteTracer) GetResult() (json.RawMessage, error) { // Stop terminates execution of the tracer at the first opportune moment. func (t *fourByteTracer) Stop(err error) { t.reason = err - atomic.StoreUint32(&t.interrupt, 1) + t.interrupt.Store(true) } func bytesToHex(s []byte) string { diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go index 7b631a88f620..5cf7efbabfe3 100644 --- a/eth/tracers/native/call.go +++ b/eth/tracers/native/call.go @@ -102,8 +102,8 @@ type callTracer struct { callstack []callFrame config callTracerConfig gasLimit uint64 - interrupt uint32 // Atomic flag to signal execution interruption - reason error // Textual reason for the interruption + interrupt atomic.Bool // Atomic flag to signal execution interruption + reason error // Textual reason for the interruption } type callTracerConfig struct { @@ -156,7 +156,7 @@ func (t *callTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, sco return } // Skip if tracing was interrupted - if atomic.LoadUint32(&t.interrupt) > 0 { + if t.interrupt.Load() { return } switch op { @@ -187,7 +187,7 @@ func (t *callTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common. return } // Skip if tracing was interrupted - if atomic.LoadUint32(&t.interrupt) > 0 { + if t.interrupt.Load() { return } @@ -251,7 +251,7 @@ func (t *callTracer) GetResult() (json.RawMessage, error) { // Stop terminates execution of the tracer at the first opportune moment. func (t *callTracer) Stop(err error) { t.reason = err - atomic.StoreUint32(&t.interrupt, 1) + t.interrupt.Store(true) } // clearFailedLogs clears the logs of a callframe and all its children diff --git a/eth/tracers/native/prestate.go b/eth/tracers/native/prestate.go index 948d09ef767c..04b77b8f982c 100644 --- a/eth/tracers/native/prestate.go +++ b/eth/tracers/native/prestate.go @@ -62,8 +62,8 @@ type prestateTracer struct { to common.Address gasLimit uint64 // Amount of gas bought for the whole tx config prestateTracerConfig - interrupt uint32 // Atomic flag to signal execution interruption - reason error // Textual reason for the interruption + interrupt atomic.Bool // Atomic flag to signal execution interruption + reason error // Textual reason for the interruption created map[common.Address]bool deleted map[common.Address]bool } @@ -256,7 +256,7 @@ func (t *prestateTracer) GetResult() (json.RawMessage, error) { // Stop terminates execution of the tracer at the first opportune moment. func (t *prestateTracer) Stop(err error) { t.reason = err - atomic.StoreUint32(&t.interrupt, 1) + t.interrupt.Store(true) } // lookupAccount fetches details of an account and adds it to the prestate diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index 4e374c9e2832..7c01b879b8e7 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -75,14 +75,14 @@ type Database struct { log log.Logger // Contextual logger tracking the database path - activeComp int // Current number of active compactions - compStartTime time.Time // The start time of the earliest currently-active compaction - compTime int64 // Total time spent in compaction in ns - level0Comp uint32 // Total number of level-zero compactions - nonLevel0Comp uint32 // Total number of non level-zero compactions - writeDelayStartTime time.Time // The start time of the latest write stall - writeDelayCount int64 // Total number of write stall counts - writeDelayTime int64 // Total time spent in write stalls + activeComp int // Current number of active compactions + compStartTime time.Time // The start time of the earliest currently-active compaction + compTime atomic.Int64 // Total time spent in compaction in ns + level0Comp atomic.Uint32 // Total number of level-zero compactions + nonLevel0Comp atomic.Uint32 // Total number of non level-zero compactions + writeDelayStartTime time.Time // The start time of the latest write stall + writeDelayCount atomic.Int64 // Total number of write stall counts + writeDelayTime atomic.Int64 // Total time spent in write stalls } func (d *Database) onCompactionBegin(info pebble.CompactionInfo) { @@ -91,16 +91,16 @@ func (d *Database) onCompactionBegin(info pebble.CompactionInfo) { } l0 := info.Input[0] if l0.Level == 0 { - atomic.AddUint32(&d.level0Comp, 1) + d.level0Comp.Add(1) } else { - atomic.AddUint32(&d.nonLevel0Comp, 1) + d.nonLevel0Comp.Add(1) } d.activeComp++ } func (d *Database) onCompactionEnd(info pebble.CompactionInfo) { if d.activeComp == 1 { - atomic.AddInt64(&d.compTime, int64(time.Since(d.compStartTime))) + d.compTime.Add(int64(time.Since(d.compStartTime))) } else if d.activeComp == 0 { panic("should not happen") } @@ -112,7 +112,7 @@ func (d *Database) onWriteStallBegin(b pebble.WriteStallBeginInfo) { } func (d *Database) onWriteStallEnd() { - atomic.AddInt64(&d.writeDelayTime, int64(time.Since(d.writeDelayStartTime))) + d.writeDelayTime.Add(int64(time.Since(d.writeDelayStartTime))) } // New returns a wrapped pebble DB object. The namespace is the prefix that the @@ -407,11 +407,11 @@ func (d *Database) meter(refresh time.Duration) { nWrite int64 metrics = d.db.Metrics() - compTime = atomic.LoadInt64(&d.compTime) - writeDelayCount = atomic.LoadInt64(&d.writeDelayCount) - writeDelayTime = atomic.LoadInt64(&d.writeDelayTime) - nonLevel0CompCount = int64(atomic.LoadUint32(&d.nonLevel0Comp)) - level0CompCount = int64(atomic.LoadUint32(&d.level0Comp)) + compTime = d.compTime.Load() + writeDelayCount = d.writeDelayCount.Load() + writeDelayTime = d.writeDelayTime.Load() + nonLevel0CompCount = int64(d.nonLevel0Comp.Load()) + level0CompCount = int64(d.level0Comp.Load()) ) writeDelayTimes[i%2] = writeDelayTime writeDelayCounts[i%2] = writeDelayCount diff --git a/internal/cmdtest/test_cmd.go b/internal/cmdtest/test_cmd.go index fd7a4a8b7f4c..77826225462c 100644 --- a/internal/cmdtest/test_cmd.go +++ b/internal/cmdtest/test_cmd.go @@ -55,12 +55,12 @@ type TestCmd struct { Err error } -var id int32 +var id atomic.Int32 // Run exec's the current binary using name as argv[0] which will trigger the // reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go) func (tt *TestCmd) Run(name string, args ...string) { - id := atomic.AddInt32(&id, 1) + id := id.Add(1) tt.stderr = &testlogger{t: tt.T, name: fmt.Sprintf("%d", id)} tt.cmd = &exec.Cmd{ Path: reexec.Self(), diff --git a/les/api_test.go b/les/api_test.go index db680da0bff7..e76539e0b555 100644 --- a/les/api_test.go +++ b/les/api_test.go @@ -149,7 +149,7 @@ func testCapacityAPI(t *testing.T, clientCount int) { var wg sync.WaitGroup stop := make(chan struct{}) - reqCount := make([]uint64, len(clientRpcClients)) + reqCount := make([]atomic.Uint64, len(clientRpcClients)) // Send light request like crazy. for i, c := range clientRpcClients { @@ -159,7 +159,7 @@ func testCapacityAPI(t *testing.T, clientCount int) { defer wg.Done() queue := make(chan struct{}, 100) - reqCount[i] = 0 + reqCount[i].Store(0) for { select { case queue <- struct{}{}: @@ -175,7 +175,7 @@ func testCapacityAPI(t *testing.T, clientCount int) { wg.Done() <-queue if ok { - count := atomic.AddUint64(&reqCount[i], 1) + count := reqCount[i].Add(1) if count%10000 == 0 { freezeClient(ctx, t, serverRpcClient, clients[i].ID()) } @@ -194,7 +194,7 @@ func testCapacityAPI(t *testing.T, clientCount int) { processedSince := func(start []uint64) []uint64 { res := make([]uint64, len(reqCount)) for i := range reqCount { - res[i] = atomic.LoadUint64(&reqCount[i]) + res[i] = reqCount[i].Load() if start != nil { res[i] -= start[i] } @@ -294,8 +294,8 @@ func testCapacityAPI(t *testing.T, clientCount int) { close(stop) wg.Wait() - for i, count := range reqCount { - t.Log("client", i, "processed", count) + for i := range reqCount { + t.Log("client", i, "processed", reqCount[i].Load()) } return true }) { diff --git a/les/checkpointoracle/oracle.go b/les/checkpointoracle/oracle.go index 6ad1ea293861..6741ff6301fd 100644 --- a/les/checkpointoracle/oracle.go +++ b/les/checkpointoracle/oracle.go @@ -40,7 +40,7 @@ type CheckpointOracle struct { config *params.CheckpointOracleConfig contract *checkpointoracle.CheckpointOracle - running int32 // Flag whether the contract backend is set or not + running atomic.Bool // Flag whether the contract backend is set or not getLocal func(uint64) params.TrustedCheckpoint // Function used to retrieve local checkpoint checkMu sync.Mutex // Mutex to sync access to the fields below @@ -65,7 +65,7 @@ func (oracle *CheckpointOracle) Start(backend bind.ContractBackend) { log.Error("Oracle contract binding failed", "err", err) return } - if !atomic.CompareAndSwapInt32(&oracle.running, 0, 1) { + if !oracle.running.CompareAndSwap(false, true) { log.Error("Already bound and listening to registrar") return } @@ -74,7 +74,7 @@ func (oracle *CheckpointOracle) Start(backend bind.ContractBackend) { // IsRunning returns an indicator whether the oracle is running. func (oracle *CheckpointOracle) IsRunning() bool { - return atomic.LoadInt32(&oracle.running) == 1 + return oracle.running.Load() } // Contract returns the underlying raw checkpoint oracle contract. diff --git a/les/client_handler.go b/les/client_handler.go index cce99d41dc14..446a6a72545f 100644 --- a/les/client_handler.go +++ b/les/client_handler.go @@ -21,7 +21,6 @@ import ( "math/big" "math/rand" "sync" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -151,8 +150,8 @@ func (h *clientHandler) handle(p *serverPeer, noInitAnnounce bool) error { } // Mark the peer starts to be served. - atomic.StoreUint32(&p.serving, 1) - defer atomic.StoreUint32(&p.serving, 0) + p.serving.Store(true) + defer p.serving.Store(false) // Spawn a main loop to handle all incoming messages. for { diff --git a/les/costtracker.go b/les/costtracker.go index 43e32a5b2d00..695d54e14147 100644 --- a/les/costtracker.go +++ b/les/costtracker.go @@ -128,7 +128,7 @@ type costTracker struct { reqInfoCh chan reqInfo totalRechargeCh chan uint64 - stats map[uint64][]uint64 // Used for testing purpose. + stats map[uint64][]atomic.Uint64 // Used for testing purpose. // TestHooks testing bool // Disable real cost evaluation for testing purpose. @@ -152,9 +152,9 @@ func newCostTracker(db ethdb.Database, config *ethconfig.Config) (*costTracker, ct.outSizeFactor = utilTarget / float64(config.LightEgress) } if makeCostStats { - ct.stats = make(map[uint64][]uint64) + ct.stats = make(map[uint64][]atomic.Uint64) for code := range reqAvgTimeCost { - ct.stats[code] = make([]uint64, 10) + ct.stats[code] = make([]atomic.Uint64, 10) } } ct.gfLoop() @@ -423,7 +423,7 @@ func (ct *costTracker) updateStats(code, amount, servingTime, realCost uint64) { l++ realCost >>= 1 } - atomic.AddUint64(&ct.stats[code][l], 1) + ct.stats[code][l].Add(1) } } @@ -454,7 +454,7 @@ func (ct *costTracker) printStats() { return } for code, arr := range ct.stats { - log.Info("Request cost statistics", "code", code, "1/16", arr[0], "1/8", arr[1], "1/4", arr[2], "1/2", arr[3], "1", arr[4], "2", arr[5], "4", arr[6], "8", arr[7], "16", arr[8], ">16", arr[9]) + log.Info("Request cost statistics", "code", code, "1/16", arr[0].Load(), "1/8", arr[1].Load(), "1/4", arr[2].Load(), "1/2", arr[3].Load(), "1", arr[4].Load(), "2", arr[5].Load(), "4", arr[6].Load(), "8", arr[7].Load(), "16", arr[8].Load(), ">16", arr[9].Load()) } } diff --git a/les/downloader/downloader.go b/les/downloader/downloader.go index a6ebf1d2a5cc..06d507c0a0c2 100644 --- a/les/downloader/downloader.go +++ b/les/downloader/downloader.go @@ -88,7 +88,7 @@ var ( ) type Downloader struct { - mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode + mode atomic.Uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode mux *event.TypeMux // Event multiplexer to announce sync operation events checkpoint uint64 // Checkpoint block number to enforce head against (e.g. fast sync) @@ -112,9 +112,9 @@ type Downloader struct { // Status synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing - synchronising int32 + synchronising atomic.Bool notified int32 - committed int32 + committed atomic.Bool ancientLimit uint64 // The maximum block number which can be regarded as ancient data. // Channels @@ -272,7 +272,7 @@ func (d *Downloader) Progress() ethereum.SyncProgress { // Synchronising returns whether the downloader is currently retrieving blocks. func (d *Downloader) Synchronising() bool { - return atomic.LoadInt32(&d.synchronising) > 0 + return d.synchronising.Load() } // RegisterPeer injects a new download peer into the set of block source to be @@ -355,10 +355,10 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode return d.synchroniseMock(id, hash) } // Make sure only one goroutine is ever allowed past this point at once - if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { + if !d.synchronising.CompareAndSwap(false, true) { return errBusy } - defer atomic.StoreInt32(&d.synchronising, 0) + defer d.synchronising.Store(false) // Post a user notification of the sync (only once per session) if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { @@ -415,7 +415,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode defer d.Cancel() // No matter what, we can't leave the cancel channel open // Atomically set the requested sync mode - atomic.StoreUint32(&d.mode, uint32(mode)) + d.mode.Store(uint32(mode)) // Retrieve the origin peer and initiate the downloading process p := d.peers.Peer(id) @@ -426,7 +426,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode } func (d *Downloader) getMode() SyncMode { - return SyncMode(atomic.LoadUint32(&d.mode)) + return SyncMode(d.mode.Load()) } // syncWithPeer starts a block synchronization based on the hash chain from the @@ -491,9 +491,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber) } } - d.committed = 1 + d.committed.Store(true) if mode == FastSync && pivot.Number.Uint64() != 0 { - d.committed = 0 + d.committed.Store(false) } if mode == FastSync { // Set the ancient data limitation. @@ -1097,7 +1097,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error { // If no more headers are inbound, notify the content fetchers and return if packet.Items() == 0 { // Don't abort header fetches while the pivot is downloading - if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { + if !d.committed.Load() && pivot <= from { p.log.Debug("No headers, waiting for pivot commit") select { case <-time.After(fsHeaderContCheck): @@ -1793,7 +1793,7 @@ func (d *Downloader) processFastSyncContent() error { results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) } // Split around the pivot block and process the two sides via fast/full sync - if atomic.LoadInt32(&d.committed) == 0 { + if !d.committed.Load() { latest := results[len(results)-1].Header // If the height is above the pivot block by 2 sets, it means the pivot // become stale in the network and it was garbage collected, move to a @@ -1918,7 +1918,7 @@ func (d *Downloader) commitPivotBlock(result *fetchResult) error { if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil { return err } - atomic.StoreInt32(&d.committed, 1) + d.committed.Store(true) return nil } diff --git a/les/downloader/downloader_test.go b/les/downloader/downloader_test.go index 1704d3e7433a..0aae0782ac6a 100644 --- a/les/downloader/downloader_test.go +++ b/les/downloader/downloader_test.go @@ -557,9 +557,9 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { tester.newPeer("peer", protocol, testChainBase) // Wrap the importer to allow stepping - blocked, proceed := uint32(0), make(chan struct{}) + blocked, proceed := atomic.Uint32{}, make(chan struct{}) tester.downloader.chainInsertHook = func(results []*fetchResult) { - atomic.StoreUint32(&blocked, uint32(len(results))) + blocked.Store(uint32(len(results))) <-proceed } // Start a synchronisation concurrently @@ -586,7 +586,7 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { tester.downloader.queue.resultCache.lock.Lock() { cached = tester.downloader.queue.resultCache.countCompleted() - frozen = int(atomic.LoadUint32(&blocked)) + frozen = int(blocked.Load()) retrieved = len(tester.ownBlocks) } tester.downloader.queue.resultCache.lock.Unlock() @@ -610,8 +610,8 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { } // Permit the blocked blocks to import - if atomic.LoadUint32(&blocked) > 0 { - atomic.StoreUint32(&blocked, uint32(0)) + if blocked.Load() > 0 { + blocked.Store(uint32(0)) proceed <- struct{}{} } } @@ -874,12 +874,12 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { tester.newPeer("peer", protocol, chain) // Instrument the downloader to signal body requests - bodiesHave, receiptsHave := int32(0), int32(0) + bodiesHave, receiptsHave := atomic.Int32{}, atomic.Int32{} tester.downloader.bodyFetchHook = func(headers []*types.Header) { - atomic.AddInt32(&bodiesHave, int32(len(headers))) + bodiesHave.Add(int32(len(headers))) } tester.downloader.receiptFetchHook = func(headers []*types.Header) { - atomic.AddInt32(&receiptsHave, int32(len(headers))) + receiptsHave.Add(int32(len(headers))) } // Synchronise with the peer and make sure all blocks were retrieved if err := tester.sync("peer", nil, mode); err != nil { @@ -899,11 +899,11 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { receiptsNeeded++ } } - if int(bodiesHave) != bodiesNeeded { - t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded) + if int(bodiesHave.Load()) != bodiesNeeded { + t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave.Load(), bodiesNeeded) } - if int(receiptsHave) != receiptsNeeded { - t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded) + if int(receiptsHave.Load()) != receiptsNeeded { + t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave.Load(), receiptsNeeded) } } diff --git a/les/downloader/peer.go b/les/downloader/peer.go index c2161e2dae42..40fa29fdad74 100644 --- a/les/downloader/peer.go +++ b/les/downloader/peer.go @@ -48,10 +48,10 @@ var ( type peerConnection struct { id string // Unique identifier of the peer - headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1) - blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1) - receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) - stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1) + headerIdle atomic.Bool // Current header activity state of the peer + blockIdle atomic.Bool // Current block activity state of the peer + receiptIdle atomic.Bool // Current receipt activity state of the peer + stateIdle atomic.Bool // Current node data activity state of the peer headerStarted time.Time // Time instance when the last header fetch was started blockStarted time.Time // Time instance when the last block (body) fetch was started @@ -121,10 +121,10 @@ func (p *peerConnection) Reset() { p.lock.Lock() defer p.lock.Unlock() - atomic.StoreInt32(&p.headerIdle, 0) - atomic.StoreInt32(&p.blockIdle, 0) - atomic.StoreInt32(&p.receiptIdle, 0) - atomic.StoreInt32(&p.stateIdle, 0) + p.headerIdle.Store(false) + p.blockIdle.Store(false) + p.receiptIdle.Store(false) + p.stateIdle.Store(false) p.lacking = make(map[common.Hash]struct{}) } @@ -132,7 +132,7 @@ func (p *peerConnection) Reset() { // FetchHeaders sends a header retrieval request to the remote peer. func (p *peerConnection) FetchHeaders(from uint64, count int) error { // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) { + if !p.headerIdle.CompareAndSwap(false, true) { return errAlreadyFetching } p.headerStarted = time.Now() @@ -146,7 +146,7 @@ func (p *peerConnection) FetchHeaders(from uint64, count int) error { // FetchBodies sends a block body retrieval request to the remote peer. func (p *peerConnection) FetchBodies(request *fetchRequest) error { // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { + if !p.blockIdle.CompareAndSwap(false, true) { return errAlreadyFetching } p.blockStarted = time.Now() @@ -166,7 +166,7 @@ func (p *peerConnection) FetchBodies(request *fetchRequest) error { // FetchReceipts sends a receipt retrieval request to the remote peer. func (p *peerConnection) FetchReceipts(request *fetchRequest) error { // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { + if !p.receiptIdle.CompareAndSwap(false, true) { return errAlreadyFetching } p.receiptStarted = time.Now() @@ -186,7 +186,7 @@ func (p *peerConnection) FetchReceipts(request *fetchRequest) error { // FetchNodeData sends a node state data retrieval request to the remote peer. func (p *peerConnection) FetchNodeData(hashes []common.Hash) error { // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { + if !p.stateIdle.CompareAndSwap(false, true) { return errAlreadyFetching } p.stateStarted = time.Now() @@ -201,7 +201,7 @@ func (p *peerConnection) FetchNodeData(hashes []common.Hash) error { // just now. func (p *peerConnection) SetHeadersIdle(delivered int, deliveryTime time.Time) { p.rates.Update(eth.BlockHeadersMsg, deliveryTime.Sub(p.headerStarted), delivered) - atomic.StoreInt32(&p.headerIdle, 0) + p.headerIdle.Store(false) } // SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval @@ -209,7 +209,7 @@ func (p *peerConnection) SetHeadersIdle(delivered int, deliveryTime time.Time) { // just now. func (p *peerConnection) SetBodiesIdle(delivered int, deliveryTime time.Time) { p.rates.Update(eth.BlockBodiesMsg, deliveryTime.Sub(p.blockStarted), delivered) - atomic.StoreInt32(&p.blockIdle, 0) + p.blockIdle.Store(false) } // SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt @@ -217,7 +217,7 @@ func (p *peerConnection) SetBodiesIdle(delivered int, deliveryTime time.Time) { // with that measured just now. func (p *peerConnection) SetReceiptsIdle(delivered int, deliveryTime time.Time) { p.rates.Update(eth.ReceiptsMsg, deliveryTime.Sub(p.receiptStarted), delivered) - atomic.StoreInt32(&p.receiptIdle, 0) + p.receiptIdle.Store(false) } // SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie @@ -225,7 +225,7 @@ func (p *peerConnection) SetReceiptsIdle(delivered int, deliveryTime time.Time) // with that measured just now. func (p *peerConnection) SetNodeDataIdle(delivered int, deliveryTime time.Time) { p.rates.Update(eth.NodeDataMsg, deliveryTime.Sub(p.stateStarted), delivered) - atomic.StoreInt32(&p.stateIdle, 0) + p.stateIdle.Store(false) } // HeaderCapacity retrieves the peers header download allowance based on its @@ -409,7 +409,7 @@ func (ps *peerSet) AllPeers() []*peerConnection { // within the active peer set, ordered by their reputation. func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) { idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.headerIdle) == 0 + return !p.headerIdle.Load() } throughput := func(p *peerConnection) int { return p.rates.Capacity(eth.BlockHeadersMsg, time.Second) @@ -421,7 +421,7 @@ func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) { // the active peer set, ordered by their reputation. func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) { idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.blockIdle) == 0 + return !p.blockIdle.Load() } throughput := func(p *peerConnection) int { return p.rates.Capacity(eth.BlockBodiesMsg, time.Second) @@ -433,7 +433,7 @@ func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) { // within the active peer set, ordered by their reputation. func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) { idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.receiptIdle) == 0 + return !p.receiptIdle.Load() } throughput := func(p *peerConnection) int { return p.rates.Capacity(eth.ReceiptsMsg, time.Second) @@ -445,7 +445,7 @@ func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) { // peers within the active peer set, ordered by their reputation. func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) { idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.stateIdle) == 0 + return !p.stateIdle.Load() } throughput := func(p *peerConnection) int { return p.rates.Capacity(eth.NodeDataMsg, time.Second) diff --git a/les/downloader/queue.go b/les/downloader/queue.go index 6896b09b388a..cffc19f2f74b 100644 --- a/les/downloader/queue.go +++ b/les/downloader/queue.go @@ -62,7 +62,7 @@ type fetchRequest struct { // fetchResult is a struct collecting partial results from data fetchers until // all outstanding pieces complete and the result as a whole can be processed. type fetchResult struct { - pending int32 // Flag telling what deliveries are outstanding + pending atomic.Int32 // Flag telling what deliveries are outstanding Header *types.Header Uncles []*types.Header @@ -75,36 +75,36 @@ func newFetchResult(header *types.Header, fastSync bool) *fetchResult { Header: header, } if !header.EmptyBody() { - item.pending |= (1 << bodyType) + item.pending.Store(item.pending.Load() | (1 << bodyType)) } if fastSync && !header.EmptyReceipts() { - item.pending |= (1 << receiptType) + item.pending.Store(item.pending.Load() | (1 << receiptType)) } return item } // SetBodyDone flags the body as finished. func (f *fetchResult) SetBodyDone() { - if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 { - atomic.AddInt32(&f.pending, -1) + if v := f.pending.Load(); (v & (1 << bodyType)) != 0 { + f.pending.Add(-1) } } // AllDone checks if item is done. func (f *fetchResult) AllDone() bool { - return atomic.LoadInt32(&f.pending) == 0 + return f.pending.Load() == 0 } // SetReceiptsDone flags the receipts as finished. func (f *fetchResult) SetReceiptsDone() { - if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 { - atomic.AddInt32(&f.pending, -2) + if v := f.pending.Load(); (v & (1 << receiptType)) != 0 { + f.pending.Add(-2) } } // Done checks if the given type is done already func (f *fetchResult) Done(kind uint) bool { - v := atomic.LoadInt32(&f.pending) + v := f.pending.Load() return v&(1<= int32(len(r.items)) { break @@ -156,7 +156,7 @@ func (r *resultStore) countCompleted() int { break } } - atomic.StoreInt32(&r.indexIncomplete, index) + r.indexIncomplete.Store(index) return int(index) } @@ -179,7 +179,7 @@ func (r *resultStore) GetCompleted(limit int) []*fetchResult { } // Advance the expected block number of the first cache entry r.resultOffset += uint64(limit) - atomic.AddInt32(&r.indexIncomplete, int32(-limit)) + r.indexIncomplete.Add(int32(-limit)) return results } diff --git a/les/fetcher/block_fetcher_test.go b/les/fetcher/block_fetcher_test.go index caff7a3b3559..5f29e8fc42e7 100644 --- a/les/fetcher/block_fetcher_test.go +++ b/les/fetcher/block_fetcher_test.go @@ -372,13 +372,13 @@ func testConcurrentAnnouncements(t *testing.T, light bool) { secondHeaderFetcher := tester.makeHeaderFetcher("second", blocks, -gatherSlack) secondBodyFetcher := tester.makeBodyFetcher("second", blocks, 0) - counter := uint32(0) + counter := atomic.Uint32{} firstHeaderWrapper := func(hash common.Hash) error { - atomic.AddUint32(&counter, 1) + counter.Add(1) return firstHeaderFetcher(hash) } secondHeaderWrapper := func(hash common.Hash) error { - atomic.AddUint32(&counter, 1) + counter.Add(1) return secondHeaderFetcher(hash) } // Iteratively announce blocks until all are imported @@ -405,8 +405,8 @@ func testConcurrentAnnouncements(t *testing.T, light bool) { verifyImportDone(t, imported) // Make sure no blocks were retrieved twice - if int(counter) != targetBlocks { - t.Fatalf("retrieval count mismatch: have %v, want %v", counter, targetBlocks) + if int(counter.Load()) != targetBlocks { + t.Fatalf("retrieval count mismatch: have %v, want %v", counter.Load(), targetBlocks) } verifyChainHeight(t, tester, uint64(len(hashes)-1)) } @@ -472,9 +472,9 @@ func testPendingDeduplication(t *testing.T, light bool) { bodyFetcher := tester.makeBodyFetcher("repeater", blocks, 0) delay := 50 * time.Millisecond - counter := uint32(0) + counter := atomic.Uint32{} headerWrapper := func(hash common.Hash) error { - atomic.AddUint32(&counter, 1) + counter.Add(1) // Simulate a long running fetch go func() { @@ -499,8 +499,8 @@ func testPendingDeduplication(t *testing.T, light bool) { time.Sleep(delay) // Check that all blocks were imported and none fetched twice - if int(counter) != 1 { - t.Fatalf("retrieval count mismatch: have %v, want %v", counter, 1) + if int(counter.Load()) != 1 { + t.Fatalf("retrieval count mismatch: have %v, want %v", counter.Load(), 1) } verifyChainHeight(t, tester, 1) } @@ -586,9 +586,9 @@ func TestImportDeduplication(t *testing.T) { headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) - counter := uint32(0) + counter := atomic.Uint32{} tester.fetcher.insertChain = func(blocks types.Blocks) (int, error) { - atomic.AddUint32(&counter, uint32(len(blocks))) + counter.Add(uint32(len(blocks))) return tester.insertChain(blocks) } // Instrument the fetching and imported events @@ -609,8 +609,8 @@ func TestImportDeduplication(t *testing.T) { tester.fetcher.Enqueue("valid", blocks[hashes[1]]) verifyImportCount(t, imported, 2) - if counter != 2 { - t.Fatalf("import invocation count mismatch: have %v, want %v", counter, 2) + if counter.Load() != 2 { + t.Fatalf("import invocation count mismatch: have %v, want %v", counter.Load(), 2) } } @@ -806,13 +806,13 @@ func TestHashMemoryExhaustionAttack(t *testing.T) { // Create a tester with instrumented import hooks tester := newTester(false) - imported, announces := make(chan interface{}), int32(0) + imported, announces := make(chan interface{}), atomic.Int32{} tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } tester.fetcher.announceChangeHook = func(hash common.Hash, added bool) { if added { - atomic.AddInt32(&announces, 1) + announces.Add(1) } else { - atomic.AddInt32(&announces, -1) + announces.Add(-1) } } // Create a valid chain and an infinite junk chain @@ -832,7 +832,7 @@ func TestHashMemoryExhaustionAttack(t *testing.T) { } tester.fetcher.Notify("attacker", attack[i], 1 /* don't distance drop */, time.Now(), attackerHeaderFetcher, attackerBodyFetcher) } - if count := atomic.LoadInt32(&announces); count != hashLimit+maxQueueDist { + if count := announces.Load(); count != hashLimit+maxQueueDist { t.Fatalf("queued announce count mismatch: have %d, want %d", count, hashLimit+maxQueueDist) } // Wait for fetches to complete @@ -853,13 +853,13 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) { // Create a tester with instrumented import hooks tester := newTester(false) - imported, enqueued := make(chan interface{}), int32(0) + imported, enqueued := make(chan interface{}), atomic.Int32{} tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } tester.fetcher.queueChangeHook = func(hash common.Hash, added bool) { if added { - atomic.AddInt32(&enqueued, 1) + enqueued.Add(1) } else { - atomic.AddInt32(&enqueued, -1) + enqueued.Add(-1) } } // Create a valid chain and a batch of dangling (but in range) blocks @@ -877,7 +877,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) { tester.fetcher.Enqueue("attacker", block) } time.Sleep(200 * time.Millisecond) - if queued := atomic.LoadInt32(&enqueued); queued != blockLimit { + if queued := enqueued.Load(); queued != blockLimit { t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit) } // Queue up a batch of valid blocks, and check that a new peer is allowed to do so @@ -885,7 +885,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) { tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-3-i]]) } time.Sleep(100 * time.Millisecond) - if queued := atomic.LoadInt32(&enqueued); queued != blockLimit+maxQueueDist-1 { + if queued := enqueued.Load(); queued != blockLimit+maxQueueDist-1 { t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit+maxQueueDist-1) } // Insert the missing piece (and sanity check the import) diff --git a/les/peer.go b/les/peer.go index deda052a3b14..650710197f1a 100644 --- a/les/peer.go +++ b/les/peer.go @@ -122,13 +122,13 @@ type peerCommons struct { *p2p.Peer rw p2p.MsgReadWriter - id string // Peer identity. - version int // Protocol version negotiated. - network uint64 // Network ID being on. - frozen uint32 // Flag whether the peer is frozen. - announceType uint64 // New block announcement type. - serving uint32 // The status indicates the peer is served. - headInfo blockInfo // Last announced block information. + id string // Peer identity. + version int // Protocol version negotiated. + network uint64 // Network ID being on. + frozen atomic.Bool // Flag whether the peer is frozen. + announceType uint64 // New block announcement type. + serving atomic.Bool // The status indicates the peer is served. + headInfo blockInfo // Last announced block information. // Background task queue for caching peer tasks and executing in order. sendQueue *utils.ExecQueue @@ -144,7 +144,7 @@ type peerCommons struct { // isFrozen returns true if the client is frozen or the server has put our // client in frozen state func (p *peerCommons) isFrozen() bool { - return atomic.LoadUint32(&p.frozen) != 0 + return p.frozen.Load() } // canQueue returns an indicator whether the peer can queue an operation. @@ -403,7 +403,7 @@ func (p *serverPeer) rejectUpdate(size uint64) bool { // freeze processes Stop messages from the given server and set the status as // frozen. func (p *serverPeer) freeze() { - if atomic.CompareAndSwapUint32(&p.frozen, 0, 1) { + if p.frozen.CompareAndSwap(false, true) { p.sendQueue.Clear() } } @@ -411,7 +411,7 @@ func (p *serverPeer) freeze() { // unfreeze processes Resume messages from the given server and set the status // as unfrozen. func (p *serverPeer) unfreeze() { - atomic.StoreUint32(&p.frozen, 0) + p.frozen.Store(false) } // sendRequest send a request to the server based on the given message type @@ -831,11 +831,11 @@ func (p *clientPeer) freeze() { if p.version < lpv3 { // if Stop/Resume is not supported then just drop the peer after setting // its frozen status permanently - atomic.StoreUint32(&p.frozen, 1) + p.frozen.Store(true) p.Peer.Disconnect(p2p.DiscUselessPeer) return } - if atomic.SwapUint32(&p.frozen, 1) == 0 { + if !p.frozen.Swap(true) { go func() { p.sendStop() time.Sleep(freezeTimeBase + time.Duration(rand.Int63n(int64(freezeTimeRandom)))) @@ -848,7 +848,7 @@ func (p *clientPeer) freeze() { time.Sleep(freezeCheckPeriod) continue } - atomic.StoreUint32(&p.frozen, 0) + p.frozen.Store(false) p.sendResume(bufValue) return } diff --git a/les/server_handler.go b/les/server_handler.go index 2ea496ac2c3a..1849a11a752f 100644 --- a/les/server_handler.go +++ b/les/server_handler.go @@ -19,7 +19,6 @@ package les import ( "errors" "sync" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -164,8 +163,8 @@ func (h *serverHandler) handle(p *clientPeer) error { }() // Mark the peer as being served. - atomic.StoreUint32(&p.serving, 1) - defer atomic.StoreUint32(&p.serving, 0) + p.serving.Store(true) + defer p.serving.Store(false) // Spawn a main loop to handle all incoming messages. for { diff --git a/les/servingqueue.go b/les/servingqueue.go index b4b53d8df548..578fcad8eaac 100644 --- a/les/servingqueue.go +++ b/les/servingqueue.go @@ -28,10 +28,11 @@ import ( // servingQueue allows running tasks in a limited number of threads and puts the // waiting tasks in a priority queue type servingQueue struct { - recentTime, queuedTime, servingTimeDiff uint64 - burstLimit, burstDropLimit uint64 - burstDecRate float64 - lastUpdate mclock.AbsTime + recentTime, queuedTime uint64 + servingTimeDiff atomic.Uint64 + burstLimit, burstDropLimit uint64 + burstDecRate float64 + lastUpdate mclock.AbsTime queueAddCh, queueBestCh chan *servingTask stopThreadCh, quit chan struct{} @@ -100,7 +101,7 @@ func (t *servingTask) done() uint64 { t.timeAdded = t.servingTime if t.expTime > diff { t.expTime -= diff - atomic.AddUint64(&t.sq.servingTimeDiff, t.expTime) + t.sq.servingTimeDiff.Add(t.expTime) } else { t.expTime = 0 } @@ -257,7 +258,7 @@ func (sq *servingQueue) freezePeers() { // updateRecentTime recalculates the recent serving time value func (sq *servingQueue) updateRecentTime() { - subTime := atomic.SwapUint64(&sq.servingTimeDiff, 0) + subTime := sq.servingTimeDiff.Swap(0) now := mclock.Now() dt := now - sq.lastUpdate sq.lastUpdate = now diff --git a/les/test_helper.go b/les/test_helper.go index 714bc7b3f625..e11caeb253e8 100644 --- a/les/test_helper.go +++ b/les/test_helper.go @@ -25,7 +25,6 @@ import ( "crypto/rand" "fmt" "math/big" - "sync/atomic" "testing" "time" @@ -438,7 +437,7 @@ func newTestPeerPair(name string, version int, server *serverHandler, client *cl return nil, nil, fmt.Errorf("failed to establish protocol connection %v", err) default: } - if atomic.LoadUint32(&peer1.serving) == 1 && atomic.LoadUint32(&peer2.serving) == 1 { + if peer1.serving.Load() && peer2.serving.Load() { break } time.Sleep(50 * time.Millisecond) @@ -499,7 +498,7 @@ func (client *testClient) newRawPeer(t *testing.T, name string, version int, rec return nil, nil, nil default: } - if atomic.LoadUint32(&peer.serving) == 1 { + if peer.serving.Load() { break } time.Sleep(50 * time.Millisecond) @@ -563,7 +562,7 @@ func (server *testServer) newRawPeer(t *testing.T, name string, version int) (*t return nil, nil, nil default: } - if atomic.LoadUint32(&peer.serving) == 1 { + if peer.serving.Load() { break } time.Sleep(50 * time.Millisecond) diff --git a/les/ulc_test.go b/les/ulc_test.go index 9a29a24cee55..791bc2885385 100644 --- a/les/ulc_test.go +++ b/les/ulc_test.go @@ -20,7 +20,6 @@ import ( "crypto/rand" "fmt" "net" - "sync/atomic" "testing" "time" @@ -136,7 +135,7 @@ func connect(server *serverHandler, serverId enode.ID, client *clientHandler, pr return nil, nil, fmt.Errorf("failed to establish protocol connection %v", err) default: } - if atomic.LoadUint32(&peer1.serving) == 1 && atomic.LoadUint32(&peer2.serving) == 1 { + if peer1.serving.Load() && peer2.serving.Load() { break } time.Sleep(50 * time.Millisecond) diff --git a/les/vflux/client/serverpool.go b/les/vflux/client/serverpool.go index 271d6e022447..83aeee8480d3 100644 --- a/les/vflux/client/serverpool.go +++ b/les/vflux/client/serverpool.go @@ -58,15 +58,16 @@ type ServerPool struct { unixTime func() int64 db ethdb.KeyValueStore - ns *nodestate.NodeStateMachine - vt *ValueTracker - mixer *enode.FairMix - mixSources []enode.Iterator - dialIterator enode.Iterator - validSchemes enr.IdentityScheme - trustedURLs []string - fillSet *FillSet - started, queryFails uint32 + ns *nodestate.NodeStateMachine + vt *ValueTracker + mixer *enode.FairMix + mixSources []enode.Iterator + dialIterator enode.Iterator + validSchemes enr.IdentityScheme + trustedURLs []string + fillSet *FillSet + started atomic.Bool + queryFails atomic.Uint32 timeoutLock sync.RWMutex timeout time.Duration @@ -256,7 +257,7 @@ func (s *ServerPool) addPreNegFilter(input enode.Iterator, query QueryFunc) enod } return } - fails := atomic.LoadUint32(&s.queryFails) + fails := s.queryFails.Load() failMax := fails if failMax > maxQueryFails { failMax = maxQueryFails @@ -273,14 +274,14 @@ func (s *ServerPool) addPreNegFilter(input enode.Iterator, query QueryFunc) enod go func() { q := query(n) if q == -1 { - atomic.AddUint32(&s.queryFails, 1) + s.queryFails.Add(1) fails++ if fails%warnQueryFails == 0 { // warn if a large number of consecutive queries have failed log.Warn("UDP connection queries failed", "count", fails) } } else { - atomic.StoreUint32(&s.queryFails, 0) + s.queryFails.Store(0) } s.ns.Operation(func() { // we are no longer running in the operation that the callback belongs to, start a new one because of setRedialWait @@ -333,7 +334,7 @@ func (s *ServerPool) Start() { } }) }) - atomic.StoreUint32(&s.started, 1) + s.started.Store(true) } // Stop stops the server pool @@ -353,7 +354,7 @@ func (s *ServerPool) Stop() { // RegisterNode implements serverPeerSubscriber func (s *ServerPool) RegisterNode(node *enode.Node) (*NodeValueTracker, error) { - if atomic.LoadUint32(&s.started) == 0 { + if !s.started.Load() { return nil, errors.New("server pool not started yet") } nvt := s.vt.Register(node.ID()) diff --git a/les/vflux/client/serverpool_test.go b/les/vflux/client/serverpool_test.go index f1fd987d7edb..a35cb7a46878 100644 --- a/les/vflux/client/serverpool_test.go +++ b/les/vflux/client/serverpool_test.go @@ -60,7 +60,7 @@ type ServerPoolTest struct { input enode.Iterator testNodes []spTestNode trusted []string - waitCount, waitEnded int32 + waitCount, waitEnded atomic.Int32 // preNegLock protects the cycle counter, testNodes list and its connected field // (accessed from both the main thread and the preNeg callback) @@ -97,15 +97,15 @@ func newServerPoolTest(preNeg, preNegFail bool) *ServerPoolTest { func (s *ServerPoolTest) beginWait() { // ensure that dialIterator and the maximal number of pre-neg queries are not all stuck in a waiting state - for atomic.AddInt32(&s.waitCount, 1) > preNegLimit { - atomic.AddInt32(&s.waitCount, -1) + for s.waitCount.Add(1) > preNegLimit { + s.waitCount.Add(-1) s.clock.Run(time.Second) } } func (s *ServerPoolTest) endWait() { - atomic.AddInt32(&s.waitCount, -1) - atomic.AddInt32(&s.waitEnded, 1) + s.waitCount.Add(-1) + s.waitEnded.Add(1) } func (s *ServerPoolTest) addTrusted(i int) { @@ -177,7 +177,7 @@ func (s *ServerPoolTest) start() { for { select { case <-time.After(time.Millisecond * 100): - c := atomic.LoadInt32(&s.waitEnded) + c := s.waitEnded.Load() if c == last { // advance clock if test is stuck (might happen in rare cases) s.clock.Run(time.Second) diff --git a/light/lightchain.go b/light/lightchain.go index f42c904f57eb..3d169b642d27 100644 --- a/light/lightchain.go +++ b/light/lightchain.go @@ -70,9 +70,9 @@ type LightChain struct { wg sync.WaitGroup // Atomic boolean switches: - running int32 // whether LightChain is running or stopped - procInterrupt int32 // interrupts chain insert - disableCheckFreq int32 // disables header verification + running atomic.Bool // whether LightChain is running or stopped + procInterrupt atomic.Bool // interrupts chain insert + disableCheckFreq atomic.Bool // disables header verification } // NewLightChain returns a fully initialised light chain using information @@ -133,7 +133,7 @@ func (lc *LightChain) AddTrustedCheckpoint(cp *params.TrustedCheckpoint) { } func (lc *LightChain) getProcInterrupt() bool { - return atomic.LoadInt32(&lc.procInterrupt) == 1 + return lc.procInterrupt.Load() } // Odr returns the ODR backend of the chain @@ -321,7 +321,7 @@ func (lc *LightChain) GetBlockByNumber(ctx context.Context, number uint64) (*typ // Stop stops the blockchain service. If any imports are currently in progress // it will abort them using the procInterrupt. func (lc *LightChain) Stop() { - if !atomic.CompareAndSwapInt32(&lc.running, 0, 1) { + if !lc.running.CompareAndSwap(false, true) { return } close(lc.quit) @@ -334,7 +334,7 @@ func (lc *LightChain) Stop() { // errInsertionInterrupted as soon as possible. Insertion is permanently disabled after // calling this method. func (lc *LightChain) StopInsert() { - atomic.StoreInt32(&lc.procInterrupt, 1) + lc.procInterrupt.Store(true) } // Rollback is designed to remove a chain of links from the database that aren't @@ -412,7 +412,7 @@ func (lc *LightChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (i if len(chain) == 0 { return 0, nil } - if atomic.LoadInt32(&lc.disableCheckFreq) == 1 { + if lc.disableCheckFreq.Load() { checkFreq = 0 } start := time.Now() @@ -592,10 +592,10 @@ func (lc *LightChain) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) // DisableCheckFreq disables header validation. This is used for ultralight mode. func (lc *LightChain) DisableCheckFreq() { - atomic.StoreInt32(&lc.disableCheckFreq, 1) + lc.disableCheckFreq.Store(true) } // EnableCheckFreq enables header validation. func (lc *LightChain) EnableCheckFreq() { - atomic.StoreInt32(&lc.disableCheckFreq, 0) + lc.disableCheckFreq.Store(false) } diff --git a/log/format.go b/log/format.go index d7e2f820afe7..bca3c8275dc5 100644 --- a/log/format.go +++ b/log/format.go @@ -31,19 +31,19 @@ var locationTrims = []string{ // format output. func PrintOrigins(print bool) { if print { - atomic.StoreUint32(&locationEnabled, 1) + locationEnabled.Store(true) } else { - atomic.StoreUint32(&locationEnabled, 0) + locationEnabled.Store(false) } } // locationEnabled is an atomic flag controlling whether the terminal formatter // should append the log locations too when printing entries. -var locationEnabled uint32 +var locationEnabled atomic.Bool // locationLength is the maxmimum path length encountered, which all logs are // padded to to aid in alignment. -var locationLength uint32 +var locationLength atomic.Uint32 // fieldPadding is a global map with maximum field value lengths seen until now // to allow padding log contexts in a bit smarter way. @@ -107,17 +107,17 @@ func TerminalFormat(usecolor bool) Format { b := &bytes.Buffer{} lvl := r.Lvl.AlignedString() - if atomic.LoadUint32(&locationEnabled) != 0 { + if locationEnabled.Load() { // Log origin printing was requested, format the location path and line number location := fmt.Sprintf("%+v", r.Call) for _, prefix := range locationTrims { location = strings.TrimPrefix(location, prefix) } // Maintain the maximum location length for fancyer alignment - align := int(atomic.LoadUint32(&locationLength)) + align := int(locationLength.Load()) if align < len(location) { align = len(location) - atomic.StoreUint32(&locationLength, uint32(align)) + locationLength.Store(uint32(align)) } padding := strings.Repeat(" ", align-len(location)) diff --git a/log/handler_glog.go b/log/handler_glog.go index b5186d4b27ec..eab775bac937 100644 --- a/log/handler_glog.go +++ b/log/handler_glog.go @@ -39,9 +39,9 @@ var errTraceSyntax = errors.New("expect file.go:234") type GlogHandler struct { origin Handler // The origin handler this wraps - level uint32 // Current log level, atomically accessible - override uint32 // Flag whether overrides are used, atomically accessible - backtrace uint32 // Flag whether backtrace location is set + level atomic.Uint32 // Current log level + override atomic.Uint32 // Flag whether overrides are used + backtrace atomic.Uint32 // Flag whether backtrace location is set patterns []pattern // Current list of patterns to override with siteCache map[uintptr]Lvl // Cache of callsite pattern evaluations @@ -72,7 +72,7 @@ type pattern struct { // Verbosity sets the glog verbosity ceiling. The verbosity of individual packages // and source files can be raised using Vmodule. func (h *GlogHandler) Verbosity(level Lvl) { - atomic.StoreUint32(&h.level, uint32(level)) + h.level.Store(uint32(level)) } // Vmodule sets the glog verbosity pattern. @@ -138,7 +138,7 @@ func (h *GlogHandler) Vmodule(ruleset string) error { h.patterns = filter h.siteCache = make(map[uintptr]Lvl) - atomic.StoreUint32(&h.override, uint32(len(filter))) + h.override.Store(uint32(len(filter))) return nil } @@ -171,7 +171,7 @@ func (h *GlogHandler) BacktraceAt(location string) error { defer h.lock.Unlock() h.location = location - atomic.StoreUint32(&h.backtrace, uint32(len(location))) + h.backtrace.Store(uint32(len(location))) return nil } @@ -180,7 +180,7 @@ func (h *GlogHandler) BacktraceAt(location string) error { // and backtrace filters, finally emitting it if either allow it through. func (h *GlogHandler) Log(r *Record) error { // If backtracing is requested, check whether this is the callsite - if atomic.LoadUint32(&h.backtrace) > 0 { + if h.backtrace.Load() > 0 { // Everything below here is slow. Although we could cache the call sites the // same way as for vmodule, backtracing is so rare it's not worth the extra // complexity. @@ -198,11 +198,11 @@ func (h *GlogHandler) Log(r *Record) error { } } // If the global log level allows, fast track logging - if atomic.LoadUint32(&h.level) >= uint32(r.Lvl) { + if h.level.Load() >= uint32(r.Lvl) { return h.origin.Log(r) } // If no local overrides are present, fast track skipping - if atomic.LoadUint32(&h.override) == 0 { + if h.override.Load() == 0 { return nil } // Check callsite cache for previously calculated log levels diff --git a/metrics/counter.go b/metrics/counter.go index 2f78c90d5c64..55e1c59540f6 100644 --- a/metrics/counter.go +++ b/metrics/counter.go @@ -38,13 +38,13 @@ func NewCounter() Counter { if !Enabled { return NilCounter{} } - return &StandardCounter{0} + return &StandardCounter{} } // NewCounterForced constructs a new StandardCounter and returns it no matter if // the global switch is enabled or not. func NewCounterForced() Counter { - return &StandardCounter{0} + return &StandardCounter{} } // NewRegisteredCounter constructs and registers a new StandardCounter. @@ -115,27 +115,27 @@ func (NilCounter) Snapshot() Counter { return NilCounter{} } // StandardCounter is the standard implementation of a Counter and uses the // sync/atomic package to manage a single int64 value. type StandardCounter struct { - count int64 + count atomic.Int64 } // Clear sets the counter to zero. func (c *StandardCounter) Clear() { - atomic.StoreInt64(&c.count, 0) + c.count.Store(0) } // Count returns the current count. func (c *StandardCounter) Count() int64 { - return atomic.LoadInt64(&c.count) + return c.count.Load() } // Dec decrements the counter by the given amount. func (c *StandardCounter) Dec(i int64) { - atomic.AddInt64(&c.count, -i) + c.count.Add(-i) } // Inc increments the counter by the given amount. func (c *StandardCounter) Inc(i int64) { - atomic.AddInt64(&c.count, i) + c.count.Add(i) } // Snapshot returns a read-only copy of the counter. diff --git a/metrics/ewma.go b/metrics/ewma.go index 039286493ebc..ed95cba19b4f 100644 --- a/metrics/ewma.go +++ b/metrics/ewma.go @@ -75,7 +75,7 @@ func (NilEWMA) Update(n int64) {} // of uncounted events and processes them on each tick. It uses the // sync/atomic package to manage uncounted events. type StandardEWMA struct { - uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment + uncounted atomic.Int64 alpha float64 rate float64 init bool @@ -97,8 +97,8 @@ func (a *StandardEWMA) Snapshot() EWMA { // Tick ticks the clock to update the moving average. It assumes it is called // every five seconds. func (a *StandardEWMA) Tick() { - count := atomic.LoadInt64(&a.uncounted) - atomic.AddInt64(&a.uncounted, -count) + count := a.uncounted.Load() + a.uncounted.Add(-count) instantRate := float64(count) / float64(5*time.Second) a.mutex.Lock() defer a.mutex.Unlock() @@ -112,5 +112,5 @@ func (a *StandardEWMA) Tick() { // Update adds n uncounted events. func (a *StandardEWMA) Update(n int64) { - atomic.AddInt64(&a.uncounted, n) + a.uncounted.Add(n) } diff --git a/metrics/gauge.go b/metrics/gauge.go index b6b2758b0d13..81137d7f7c5e 100644 --- a/metrics/gauge.go +++ b/metrics/gauge.go @@ -25,7 +25,7 @@ func NewGauge() Gauge { if !Enabled { return NilGauge{} } - return &StandardGauge{0} + return &StandardGauge{} } // NewRegisteredGauge constructs and registers a new StandardGauge. @@ -101,7 +101,7 @@ func (NilGauge) Value() int64 { return 0 } // StandardGauge is the standard implementation of a Gauge and uses the // sync/atomic package to manage a single int64 value. type StandardGauge struct { - value int64 + value atomic.Int64 } // Snapshot returns a read-only copy of the gauge. @@ -111,22 +111,22 @@ func (g *StandardGauge) Snapshot() Gauge { // Update updates the gauge's value. func (g *StandardGauge) Update(v int64) { - atomic.StoreInt64(&g.value, v) + g.value.Store(v) } // Value returns the gauge's current value. func (g *StandardGauge) Value() int64 { - return atomic.LoadInt64(&g.value) + return g.value.Load() } // Dec decrements the gauge's current value by the given amount. func (g *StandardGauge) Dec(i int64) { - atomic.AddInt64(&g.value, -i) + g.value.Add(-i) } // Inc increments the gauge's current value by the given amount. func (g *StandardGauge) Inc(i int64) { - atomic.AddInt64(&g.value, i) + g.value.Add(i) } // FunctionalGauge returns value from given function diff --git a/metrics/meter.go b/metrics/meter.go index 60ae919d04db..e8564d6a5e76 100644 --- a/metrics/meter.go +++ b/metrics/meter.go @@ -101,11 +101,7 @@ func NewRegisteredMeterForced(name string, r Registry) Meter { // MeterSnapshot is a read-only copy of another Meter. type MeterSnapshot struct { - // WARNING: The `temp` field is accessed atomically. - // On 32 bit platforms, only 64-bit aligned fields can be atomic. The struct is - // guaranteed to be so aligned, so take advantage of that. For more information, - // see https://golang.org/pkg/sync/atomic/#pkg-note-BUG. - temp int64 + temp atomic.Int64 count int64 rate1, rate5, rate15, rateMean float64 } @@ -173,7 +169,7 @@ type StandardMeter struct { snapshot *MeterSnapshot a1, a5, a15 EWMA startTime time.Time - stopped uint32 + stopped atomic.Bool } func newStandardMeter() *StandardMeter { @@ -188,8 +184,8 @@ func newStandardMeter() *StandardMeter { // Stop stops the meter, Mark() will be a no-op if you use it after being stopped. func (m *StandardMeter) Stop() { - stopped := atomic.SwapUint32(&m.stopped, 1) - if stopped != 1 { + stopped := m.stopped.Swap(true) + if !stopped { arbiter.Lock() delete(arbiter.meters, m) arbiter.Unlock() @@ -207,7 +203,7 @@ func (m *StandardMeter) Count() int64 { // Mark records the occurrence of n events. func (m *StandardMeter) Mark(n int64) { - atomic.AddInt64(&m.snapshot.temp, n) + m.snapshot.temp.Add(n) } // Rate1 returns the one-minute moving average rate of events per second. @@ -241,7 +237,14 @@ func (m *StandardMeter) RateMean() float64 { // Snapshot returns a read-only copy of the meter. func (m *StandardMeter) Snapshot() Meter { m.lock.RLock() - snapshot := *m.snapshot + snapshot := MeterSnapshot{ + count: m.snapshot.count, + rate1: m.snapshot.rate1, + rate5: m.snapshot.rate5, + rate15: m.snapshot.rate15, + rateMean: m.snapshot.rateMean, + } + snapshot.temp.Store(m.snapshot.temp.Load()) m.lock.RUnlock() return &snapshot } @@ -257,7 +260,7 @@ func (m *StandardMeter) updateSnapshot() { func (m *StandardMeter) updateMeter() { // should only run with write lock held on m.lock - n := atomic.SwapInt64(&m.snapshot.temp, 0) + n := m.snapshot.temp.Swap(0) m.snapshot.count += n m.a1.Update(n) m.a5.Update(n) diff --git a/miner/worker.go b/miner/worker.go index 8940c5037b41..289681a09ac6 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -164,7 +164,7 @@ const ( // newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. type newWorkReq struct { - interrupt *int32 + interrupt *atomic.Int32 noempty bool timestamp int64 } @@ -239,15 +239,15 @@ type worker struct { snapshotState *state.StateDB // atomic status counters - running int32 // The indicator whether the consensus engine is running or not. - newTxs int32 // New arrival transaction count since last sealing work submitting. + running atomic.Bool // The indicator whether the consensus engine is running or not. + newTxs atomic.Int32 // New arrival transaction count since last sealing work submitting. // noempty is the flag used to control whether the feature of pre-seal empty // block is enabled. The default value is false(pre-seal is enabled by default). // But in some special scenario the consensus engine will seal blocks instantaneously, // in this case this feature will add all empty blocks into canonical chain // non-stop and no real transaction will be included. - noempty uint32 + noempty atomic.Bool // newpayloadTimeout is the maximum timeout allowance for creating payload. // The default value is 2 seconds but node operator can set it to arbitrary @@ -372,12 +372,12 @@ func (w *worker) setRecommitInterval(interval time.Duration) { // disablePreseal disables pre-sealing feature func (w *worker) disablePreseal() { - atomic.StoreUint32(&w.noempty, 1) + w.noempty.Store(true) } // enablePreseal enables pre-sealing feature func (w *worker) enablePreseal() { - atomic.StoreUint32(&w.noempty, 0) + w.noempty.Store(false) } // pending returns the pending state and corresponding block. @@ -409,24 +409,24 @@ func (w *worker) pendingBlockAndReceipts() (*types.Block, types.Receipts) { // start sets the running status as 1 and triggers new work submitting. func (w *worker) start() { - atomic.StoreInt32(&w.running, 1) + w.running.Store(true) w.startCh <- struct{}{} } // stop sets the running status as 0. func (w *worker) stop() { - atomic.StoreInt32(&w.running, 0) + w.running.Store(false) } // isRunning returns an indicator whether worker is running or not. func (w *worker) isRunning() bool { - return atomic.LoadInt32(&w.running) == 1 + return w.running.Load() } // close terminates all background threads maintained by the worker. // Note the worker does not support being closed multiple times. func (w *worker) close() { - atomic.StoreInt32(&w.running, 0) + w.running.Store(false) close(w.exitCh) w.wg.Wait() } @@ -457,7 +457,7 @@ func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) t func (w *worker) newWorkLoop(recommit time.Duration) { defer w.wg.Done() var ( - interrupt *int32 + interrupt *atomic.Int32 minRecommit = recommit // minimal resubmit interval specified by user. timestamp int64 // timestamp for each round of sealing. ) @@ -469,16 +469,16 @@ func (w *worker) newWorkLoop(recommit time.Duration) { // commit aborts in-flight transaction execution with given signal and resubmits a new one. commit := func(noempty bool, s int32) { if interrupt != nil { - atomic.StoreInt32(interrupt, s) + interrupt.Store(s) } - interrupt = new(int32) + interrupt = &atomic.Int32{} select { case w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp}: case <-w.exitCh: return } timer.Reset(recommit) - atomic.StoreInt32(&w.newTxs, 0) + w.newTxs.Store(0) } // clearPending cleans the stale pending tasks. clearPending := func(number uint64) { @@ -508,7 +508,7 @@ func (w *worker) newWorkLoop(recommit time.Duration) { // higher priced transactions. Disable this overhead for pending blocks. if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) { // Short circuit if no new transaction arrives. - if atomic.LoadInt32(&w.newTxs) == 0 { + if w.newTxs.Load() == 0 { timer.Reset(recommit) continue } @@ -650,7 +650,7 @@ func (w *worker) mainLoop() { w.commitWork(nil, true, time.Now().Unix()) } } - atomic.AddInt32(&w.newTxs, int32(len(ev.Txs))) + w.newTxs.Add(int32(len(ev.Txs))) // System stopped case <-w.exitCh: @@ -874,7 +874,7 @@ func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]* return receipt.Logs, nil } -func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, interrupt *int32) error { +func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, interrupt *atomic.Int32) error { gasLimit := env.header.GasLimit if env.gasPool == nil { env.gasPool = new(core.GasPool).AddGas(gasLimit) @@ -884,7 +884,7 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP for { // Check interruption signal and abort building if it's fired. if interrupt != nil { - if signal := atomic.LoadInt32(interrupt); signal != commitInterruptNone { + if signal := interrupt.Load(); signal != commitInterruptNone { return signalToErr(signal) } } @@ -1064,7 +1064,7 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { // fillTransactions retrieves the pending transactions from the txpool and fills them // into the given sealing block. The transaction selection and ordering strategy can // be customized with the plugin in the future. -func (w *worker) fillTransactions(interrupt *int32, env *environment) error { +func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) error { // Split the pending transactions into locals and remotes // Fill the block with all available pending transactions. pending := w.eth.TxPool().Pending(true) @@ -1099,13 +1099,13 @@ func (w *worker) generateWork(params *generateParams) (*types.Block, *big.Int, e defer work.discard() if !params.noTxs { - interrupt := new(int32) + interrupt := atomic.Int32{} timer := time.AfterFunc(w.newpayloadTimeout, func() { - atomic.StoreInt32(interrupt, commitInterruptTimeout) + interrupt.Store(commitInterruptTimeout) }) defer timer.Stop() - err := w.fillTransactions(interrupt, work) + err := w.fillTransactions(&interrupt, work) if errors.Is(err, errBlockInterruptedByTimeout) { log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(w.newpayloadTimeout)) } @@ -1119,7 +1119,7 @@ func (w *worker) generateWork(params *generateParams) (*types.Block, *big.Int, e // commitWork generates several new sealing tasks based on the parent block // and submit them to the sealer. -func (w *worker) commitWork(interrupt *int32, noempty bool, timestamp int64) { +func (w *worker) commitWork(interrupt *atomic.Int32, noempty bool, timestamp int64) { start := time.Now() // Set the coinbase if the worker is running or it's required @@ -1140,7 +1140,7 @@ func (w *worker) commitWork(interrupt *int32, noempty bool, timestamp int64) { } // Create an empty block based on temporary copied state for // sealing in advance without waiting block execution finished. - if !noempty && atomic.LoadUint32(&w.noempty) == 0 { + if !noempty && !w.noempty.Load() { w.commit(work.copy(), nil, false, start) } // Fill pending transactions from the txpool into the block. diff --git a/miner/worker_test.go b/miner/worker_test.go index e60de679326c..9db64a240d36 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -454,11 +454,11 @@ func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine co progress = make(chan struct{}, 10) result = make([]float64, 0, 10) index = 0 - start uint32 + start atomic.Bool ) w.resubmitHook = func(minInterval time.Duration, recommitInterval time.Duration) { // Short circuit if interval checking hasn't started. - if atomic.LoadUint32(&start) == 0 { + if !start.Load() { return } var wantMinInterval, wantRecommitInterval time.Duration @@ -493,7 +493,7 @@ func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine co w.start() time.Sleep(time.Second) // Ensure two tasks have been submitted due to start opt - atomic.StoreUint32(&start, 1) + start.Store(true) w.setRecommitInterval(3 * time.Second) select { diff --git a/p2p/enode/iter_test.go b/p2p/enode/iter_test.go index 5014346af465..0372a2c5a8c7 100644 --- a/p2p/enode/iter_test.go +++ b/p2p/enode/iter_test.go @@ -248,8 +248,9 @@ func approxEqual(x, y, ε int) bool { // genIter creates fake nodes with numbered IDs based on 'index' and 'gen' type genIter struct { - node *Node - index, gen uint32 + node *Node + index uint32 + gen uint32 } func (s *genIter) Next() bool { diff --git a/p2p/message.go b/p2p/message.go index 24f21456d8e5..56e1b7d1924d 100644 --- a/p2p/message.go +++ b/p2p/message.go @@ -156,7 +156,7 @@ func MsgPipe() (*MsgPipeRW, *MsgPipeRW) { var ( c1, c2 = make(chan Msg), make(chan Msg) closing = make(chan struct{}) - closed = new(int32) + closed = &atomic.Int32{} rw1 = &MsgPipeRW{c1, c2, closing, closed} rw2 = &MsgPipeRW{c2, c1, closing, closed} ) @@ -172,13 +172,13 @@ type MsgPipeRW struct { w chan<- Msg r <-chan Msg closing chan struct{} - closed *int32 + closed *atomic.Int32 } // WriteMsg sends a message on the pipe. // It blocks until the receiver has consumed the message payload. func (p *MsgPipeRW) WriteMsg(msg Msg) error { - if atomic.LoadInt32(p.closed) == 0 { + if p.closed.Load() == 0 { consumed := make(chan struct{}, 1) msg.Payload = &eofSignal{msg.Payload, msg.Size, consumed} select { @@ -199,7 +199,7 @@ func (p *MsgPipeRW) WriteMsg(msg Msg) error { // ReadMsg returns a message sent on the other end of the pipe. func (p *MsgPipeRW) ReadMsg() (Msg, error) { - if atomic.LoadInt32(p.closed) == 0 { + if p.closed.Load() == 0 { select { case msg := <-p.r: return msg, nil @@ -213,9 +213,9 @@ func (p *MsgPipeRW) ReadMsg() (Msg, error) { // of the pipe. They will return ErrPipeClosed. Close also // interrupts any reads from a message payload. func (p *MsgPipeRW) Close() error { - if atomic.AddInt32(p.closed, 1) != 1 { + if p.closed.Add(1) != 1 { // someone else is already closing - atomic.StoreInt32(p.closed, 1) // avoid overflow + p.closed.Store(1) // avoid overflow return nil } close(p.closing) diff --git a/p2p/simulations/examples/ping-pong.go b/p2p/simulations/examples/ping-pong.go index d9b51dc09bb0..f6cf5113a6ee 100644 --- a/p2p/simulations/examples/ping-pong.go +++ b/p2p/simulations/examples/ping-pong.go @@ -91,7 +91,7 @@ func main() { type pingPongService struct { id enode.ID log log.Logger - received int64 + received atomic.Int64 } func newPingPongService(id enode.ID) *pingPongService { @@ -125,7 +125,7 @@ func (p *pingPongService) Info() interface{} { return struct { Received int64 `json:"received"` }{ - atomic.LoadInt64(&p.received), + p.received.Load(), } } @@ -162,7 +162,7 @@ func (p *pingPongService) Run(peer *p2p.Peer, rw p2p.MsgReadWriter) error { return } log.Info("received message", "msg.code", msg.Code, "msg.payload", string(payload)) - atomic.AddInt64(&p.received, 1) + p.received.Add(1) if msg.Code == pingMsgCode { log.Info("sending pong") go p2p.Send(rw, pongMsgCode, "PONG") diff --git a/p2p/simulations/http_test.go b/p2p/simulations/http_test.go index 05e43238abb5..51705fb2e5b7 100644 --- a/p2p/simulations/http_test.go +++ b/p2p/simulations/http_test.go @@ -54,7 +54,7 @@ type testService struct { id enode.ID // peerCount is incremented once a peer handshake has been performed - peerCount int64 + peerCount atomic.Int64 peers map[enode.ID]*testPeer peersMtx sync.Mutex @@ -170,8 +170,8 @@ func (t *testService) RunTest(p *p2p.Peer, rw p2p.MsgReadWriter) error { close(peer.testReady) // track the peer - atomic.AddInt64(&t.peerCount, 1) - defer atomic.AddInt64(&t.peerCount, -1) + t.peerCount.Add(1) + defer t.peerCount.Add(-1) // block until the peer is dropped for { @@ -235,21 +235,21 @@ func (t *testService) Snapshot() ([]byte, error) { // * subscribe to counter increment events type TestAPI struct { state *atomic.Value - peerCount *int64 - counter int64 + peerCount *atomic.Int64 + counter atomic.Int64 feed event.Feed } func (t *TestAPI) PeerCount() int64 { - return atomic.LoadInt64(t.peerCount) + return t.peerCount.Load() } func (t *TestAPI) Get() int64 { - return atomic.LoadInt64(&t.counter) + return t.counter.Load() } func (t *TestAPI) Add(delta int64) { - atomic.AddInt64(&t.counter, delta) + t.counter.Add(delta) t.feed.Send(delta) } diff --git a/rpc/client.go b/rpc/client.go index 69ff4851e317..fbeb5a18148d 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -79,7 +79,7 @@ type Client struct { isHTTP bool // connection type: http, ws or ipc services *serviceRegistry - idCounter uint32 + idCounter atomic.Uint32 // This function, if non-nil, is called when the connection is lost. reconnectFunc reconnectFunc @@ -263,7 +263,7 @@ func (c *Client) RegisterName(name string, receiver interface{}) error { } func (c *Client) nextID() json.RawMessage { - id := atomic.AddUint32(&c.idCounter, 1) + id := c.idCounter.Add(1) return strconv.AppendUint(nil, uint64(id), 10) } diff --git a/rpc/server.go b/rpc/server.go index 9c72c26d7b94..089bbb1fd5d2 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -48,7 +48,7 @@ type Server struct { mutex sync.Mutex codecs map[ServerCodec]struct{} - run int32 + run atomic.Bool } // NewServer creates a new server instance with no registered handlers. @@ -56,8 +56,8 @@ func NewServer() *Server { server := &Server{ idgen: randomIDGenerator(), codecs: make(map[ServerCodec]struct{}), - run: 1, } + server.run.Store(true) // Register the default service providing meta information about the RPC service such // as the services and methods it offers. rpcService := &RPCService{server} @@ -95,7 +95,7 @@ func (s *Server) trackCodec(codec ServerCodec) bool { s.mutex.Lock() defer s.mutex.Unlock() - if atomic.LoadInt32(&s.run) == 0 { + if !s.run.Load() { return false // Don't serve if server is stopped. } s.codecs[codec] = struct{}{} @@ -114,7 +114,7 @@ func (s *Server) untrackCodec(codec ServerCodec) { // this mode. func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) { // Don't serve if server is stopped. - if atomic.LoadInt32(&s.run) == 0 { + if !s.run.Load() { return } @@ -144,7 +144,7 @@ func (s *Server) Stop() { s.mutex.Lock() defer s.mutex.Unlock() - if atomic.CompareAndSwapInt32(&s.run, 1, 0) { + if s.run.CompareAndSwap(true, false) { log.Debug("RPC server shutting down") for codec := range s.codecs { codec.close() diff --git a/signer/core/stdioui.go b/signer/core/stdioui.go index 6963a89122f6..a0ce6844171f 100644 --- a/signer/core/stdioui.go +++ b/signer/core/stdioui.go @@ -25,7 +25,7 @@ import ( ) type StdIOUI struct { - client rpc.Client + client *rpc.Client } func NewStdIOUI() *StdIOUI { @@ -33,7 +33,7 @@ func NewStdIOUI() *StdIOUI { if err != nil { log.Crit("Could not create stdio client", "err", err) } - ui := &StdIOUI{client: *client} + ui := &StdIOUI{client: client} return ui }