Skip to content

Commit

Permalink
Merge pull request #1928 from bnb-chain/develop
Browse files Browse the repository at this point in the history
  • Loading branch information
brilliant-lx authored Nov 9, 2023
2 parents 1812181 + a4eee3b commit 1dca486
Show file tree
Hide file tree
Showing 56 changed files with 1,638 additions and 570 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/build-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
unit-test:
strategy:
matrix:
go-version: [1.19.x]
go-version: [1.20.x]
os: [ubuntu-latest]
runs-on: ${{ matrix.os }}
steps:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
golang-lint:
strategy:
matrix:
go-version: [1.19.x]
go-version: [1.20.x]
os: [ubuntu-latest]
runs-on: ${{ matrix.os }}
steps:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/pre-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
name: Build Release
strategy:
matrix:
go-version: [1.19.x]
go-version: [1.20.x]
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jobs:
name: Build Release
strategy:
matrix:
go-version: [1.19.x]
go-version: [1.20.x]
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/unit-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
unit-test:
strategy:
matrix:
go-version: [1.19.x]
go-version: [1.20.x]
os: [ubuntu-latest]
runs-on: ${{ matrix.os }}
steps:
Expand Down
3 changes: 0 additions & 3 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,6 @@ issues:
- path: core/state/metrics.go
linters:
- unused
- path: core/state/statedb_fuzz_test.go
linters:
- unused
- path: core/txpool/legacypool/list.go
linters:
- staticcheck
Expand Down
20 changes: 20 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,24 @@
# Changelog
## v1.3.1
FEATURE
* [\#1881](https://github.com/bnb-chain/bsc/pull/1881) feat: active pbss
* [\#1882](https://github.com/bnb-chain/bsc/pull/1882) cmd/geth: add hbss to pbss convert tool
* [\#1916](https://github.com/bnb-chain/bsc/pull/1916) feat: cherry-pick pbss patch commits from eth repo in v1.13.2
* [\#1939](https://github.com/bnb-chain/bsc/pull/1939) dependency: go version to 1.20 and some dependencies in go.mod
* [\#1955](https://github.com/bnb-chain/bsc/pull/1955) eth, trie/triedb/pathdb: pbss patches
* [\#1962](https://github.com/bnb-chain/bsc/pull/1962) cherry pick pbss patches from go-ethereum

BUGFIX
* [\#1923](https://github.com/bnb-chain/bsc/pull/1923) consensus/parlia: fix nextForkHash in Extra filed of block header
* [\#1950](https://github.com/bnb-chain/bsc/pull/1950) fix: 2 APIs of get receipt related
* [\#1951](https://github.com/bnb-chain/bsc/pull/1951) txpool: fix a potential crash issue in shutdown;
* [\#1963](https://github.com/bnb-chain/bsc/pull/1963) fix: revert trie commited flag after delete statedb mpt cache

IMPROVEMENT
* [\#1948](https://github.com/bnb-chain/bsc/pull/1948) performance: commitTire concurrently
* [\#1949](https://github.com/bnb-chain/bsc/pull/1949) code: remove accountTrieCache and storageTrieCache
* [\#1954](https://github.com/bnb-chain/bsc/pull/1954) trie: keep trie prefetch during validation phase

## v1.3.0
#### RPC
* [internal/ethapi: add debug_getRawReceipts RPC method (#24773)](https://github.com/bnb-chain/bsc/pull/1840/commits/ae7d834bc752a2d94fef9d354ee78fcb9425f3d1)
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ Many of the below are the same as or similar to go-ethereum.

For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/getting-started/installing-geth).

Building `geth` requires both a Go (version 1.19 or later) and a C compiler (GCC 5 or higher). You can install
Building `geth` requires both a Go (version 1.20 or later) and a C compiler (GCC 5 or higher). You can install
them using your favourite package manager. Once the dependencies are installed, run

```shell
Expand Down
2 changes: 1 addition & 1 deletion accounts/abi/bind/bind_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2145,7 +2145,7 @@ func TestGolangBindings(t *testing.T) {
t.Fatalf("failed to replace cometbft dependency to bnb-chain source: %v\n%s", err, out)
}

tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.19")
tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.20")
tidier.Dir = pkg
if out, err := tidier.CombinedOutput(); err != nil {
t.Fatalf("failed to tidy Go module file: %v\n%s", err, out)
Expand Down
1 change: 1 addition & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ var (
utils.TransactionHistoryFlag,
utils.StateSchemeFlag,
utils.StateHistoryFlag,
utils.PathDBSyncFlag,
utils.LightServeFlag,
utils.LightIngressFlag,
utils.LightEgressFlag,
Expand Down
17 changes: 16 additions & 1 deletion cmd/geth/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"fmt"
"os"
"path/filepath"
"strings"
"time"

"github.com/prometheus/tsdb/fileutil"
Expand Down Expand Up @@ -360,7 +361,21 @@ func pruneBlock(ctx *cli.Context) error {
if path == "" {
return errors.New("prune failed, did not specify the AncientPath")
}
newAncientPath = filepath.Join(path, "ancient_back")
newVersionPath := false
files, err := os.ReadDir(oldAncientPath)
if err != nil {
return err
}
for _, file := range files {
if file.IsDir() && file.Name() == "chain" {
newVersionPath = true
}
}
if newVersionPath && !strings.HasSuffix(oldAncientPath, "geth/chaindata/ancient/chain") {
log.Error("datadir.ancient subdirectory incorrect", "got path", oldAncientPath, "want subdirectory", "geth/chaindata/ancient/chain/")
return errors.New("datadir.ancient subdirectory incorrect")
}
newAncientPath = filepath.Join(path, "chain_back")

blockpruner = pruner.NewBlockPruner(chaindb, stack, oldAncientPath, newAncientPath, blockAmountReserved)

Expand Down
9 changes: 9 additions & 0 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -314,6 +314,12 @@ var (
Value: rawdb.HashScheme,
Category: flags.StateCategory,
}
PathDBSyncFlag = &cli.BoolFlag{
Name: "pathdb.sync",
Usage: "sync flush nodes cache to disk in path schema",
Value: false,
Category: flags.StateCategory,
}
StateHistoryFlag = &cli.Uint64Flag{
Name: "history.state",
Usage: "Number of recent blocks to retain state history for (default = 90,000 blocks, 0 = entire chain)",
Expand Down Expand Up @@ -1951,6 +1957,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
log.Warn("The flag --txlookuplimit is deprecated and will be removed, please use --history.transactions")
cfg.TransactionHistory = ctx.Uint64(TransactionHistoryFlag.Name)
}
if ctx.IsSet(PathDBSyncFlag.Name) {
cfg.PathSyncFlush = true
}
if ctx.String(GCModeFlag.Name) == "archive" && cfg.TransactionHistory != 0 {
cfg.TransactionHistory = 0
log.Warn("Disabled transaction unindexing for archive node")
Expand Down
4 changes: 2 additions & 2 deletions consensus/parlia/parlia.go
Original file line number Diff line number Diff line change
Expand Up @@ -950,7 +950,7 @@ func (p *Parlia) Prepare(chain consensus.ChainHeaderReader, header *types.Header
}

header.Extra = header.Extra[:extraVanity-nextForkHashSize]
nextForkHash := forkid.NewID(p.chainConfig, p.genesisHash, number, header.Time).Hash
nextForkHash := forkid.NextForkHash(p.chainConfig, p.genesisHash, number, header.Time)
header.Extra = append(header.Extra, nextForkHash[:]...)

if err := p.prepareValidators(header); err != nil {
Expand Down Expand Up @@ -1084,7 +1084,7 @@ func (p *Parlia) Finalize(chain consensus.ChainHeaderReader, header *types.Heade
if err != nil {
return err
}
nextForkHash := forkid.NewID(p.chainConfig, p.genesisHash, number, header.Time).Hash
nextForkHash := forkid.NextForkHash(p.chainConfig, p.genesisHash, number, header.Time)
if !snap.isMajorityFork(hex.EncodeToString(nextForkHash[:])) {
log.Debug("there is a possible fork, and your client is not the majority. Please check...", "nextForkHash", hex.EncodeToString(nextForkHash[:]))
}
Expand Down
9 changes: 5 additions & 4 deletions consensus/parlia/parlia_test.go
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
package parlia

import (
"crypto/rand"
"fmt"
"math/rand"
mrand "math/rand"
"testing"

"golang.org/x/crypto/sha3"
Expand Down Expand Up @@ -47,7 +48,7 @@ func simulateValidatorOutOfService(totalValidators int, downValidators int) {
validators[i] = true
down[i] = i
}
rand.Shuffle(totalValidators, func(i, j int) {
mrand.Shuffle(totalValidators, func(i, j int) {
down[i], down[j] = down[j], down[i]
})
for i := 0; i < downValidators; i++ {
Expand Down Expand Up @@ -125,8 +126,8 @@ func simulateValidatorOutOfService(totalValidators int, downValidators int) {
}

func producerBlockDelay(candidates map[int]bool, height, numOfValidators int) (int, uint64) {
s := rand.NewSource(int64(height))
r := rand.New(s)
s := mrand.NewSource(int64(height))
r := mrand.New(s)
n := numOfValidators
backOffSteps := make([]int, 0, n)
for idx := 0; idx < n; idx++ {
Expand Down
60 changes: 25 additions & 35 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ type CacheConfig struct {
NoTries bool // Insecure settings. Do not have any tries in databases if enabled.
StateHistory uint64 // Number of blocks from head whose state histories are reserved.
StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top
PathSyncFlush bool // Whether sync flush the trienodebuffer of pathdb to disk.

SnapshotNoBuild bool // Whether the background generation is allowed
SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
Expand All @@ -174,6 +175,7 @@ func (c *CacheConfig) triedbConfig() *trie.Config {
}
if c.StateScheme == rawdb.PathScheme {
config.PathDB = &pathdb.Config{
SyncFlush: c.PathSyncFlush,
StateHistory: c.StateHistory,
CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
DirtyCacheSize: c.TrieDirtyLimit * 1024 * 1024,
Expand Down Expand Up @@ -396,11 +398,15 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
var diskRoot common.Hash
if bc.cacheConfig.SnapshotLimit > 0 {
diskRoot = rawdb.ReadSnapshotRoot(bc.db)
} else if bc.triedb.Scheme() == rawdb.PathScheme {
_, diskRoot = rawdb.ReadAccountTrieNode(bc.db, nil)
}
if bc.triedb.Scheme() == rawdb.PathScheme {
recoverable, _ := bc.triedb.Recoverable(diskRoot)
if !bc.HasState(diskRoot) && !recoverable {
diskRoot = bc.triedb.Head()
}
}
if diskRoot != (common.Hash{}) {
log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "snaproot", diskRoot)
log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "diskRoot", diskRoot)

snapDisk, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, diskRoot, true)
if err != nil {
Expand Down Expand Up @@ -689,18 +695,18 @@ func (bc *BlockChain) loadLastState() error {
blockTd = bc.GetTd(headBlock.Hash(), headBlock.NumberU64())
)
if headHeader.Hash() != headBlock.Hash() {
log.Info("Loaded most recent local header", "number", headHeader.Number, "hash", headHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(headHeader.Time), 0)))
log.Info("Loaded most recent local header", "number", headHeader.Number, "hash", headHeader.Hash(), "hash", headHeader.Root, "td", headerTd, "age", common.PrettyAge(time.Unix(int64(headHeader.Time), 0)))
}
log.Info("Loaded most recent local block", "number", headBlock.Number(), "hash", headBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(headBlock.Time()), 0)))
log.Info("Loaded most recent local block", "number", headBlock.Number(), "hash", headBlock.Hash(), "root", headBlock.Root(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(headBlock.Time()), 0)))
if headBlock.Hash() != currentSnapBlock.Hash() {
snapTd := bc.GetTd(currentSnapBlock.Hash(), currentSnapBlock.Number.Uint64())
log.Info("Loaded most recent local snap block", "number", currentSnapBlock.Number, "hash", currentSnapBlock.Hash(), "td", snapTd, "age", common.PrettyAge(time.Unix(int64(currentSnapBlock.Time), 0)))
log.Info("Loaded most recent local snap block", "number", currentSnapBlock.Number, "hash", currentSnapBlock.Hash(), "root", currentSnapBlock.Root, "td", snapTd, "age", common.PrettyAge(time.Unix(int64(currentSnapBlock.Time), 0)))
}
if posa, ok := bc.engine.(consensus.PoSA); ok {
if currentFinalizedHeader := posa.GetFinalizedHeader(bc, headHeader); currentFinalizedHeader != nil {
if currentFinalizedBlock := bc.GetBlockByHash(currentFinalizedHeader.Hash()); currentFinalizedBlock != nil {
finalTd := bc.GetTd(currentFinalizedBlock.Hash(), currentFinalizedBlock.NumberU64())
log.Info("Loaded most recent local finalized block", "number", currentFinalizedBlock.Number(), "hash", currentFinalizedBlock.Hash(), "td", finalTd, "age", common.PrettyAge(time.Unix(int64(currentFinalizedBlock.Time()), 0)))
log.Info("Loaded most recent local finalized block", "number", currentFinalizedBlock.Number(), "hash", currentFinalizedBlock.Hash(), "root", currentFinalizedBlock.Root(), "td", finalTd, "age", common.PrettyAge(time.Unix(int64(currentFinalizedBlock.Time()), 0)))
}
}
}
Expand Down Expand Up @@ -802,6 +808,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha

// resetState resets the persistent state to genesis if it's not available.
resetState := func() {
log.Info("Reset to block with genesis state", "number", bc.genesisBlock.NumberU64(), "hash", bc.genesisBlock.Hash())
// Short circuit if the genesis state is already present.
if bc.HasState(bc.genesisBlock.Root()) {
return
Expand All @@ -825,7 +832,6 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
// chain reparation mechanism without deleting any data!
if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.Number.Uint64() {
newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
lastBlockNum := header.Number.Uint64()
if newHeadBlock == nil {
log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
newHeadBlock = bc.genesisBlock
Expand All @@ -835,10 +841,8 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
// keeping rewinding until we exceed the optional threshold
// root hash
beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true)
enoughBeyondCount := false
beyondCount := 0

for {
beyondCount++
// If a root threshold was requested but not yet crossed, check
if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root {
beyondRoot, rootNumber = true, newHeadBlock.NumberU64()
Expand All @@ -854,24 +858,11 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
log.Error("Missing block in the middle, aiming genesis", "number", newHeadBlock.NumberU64()-1, "hash", newHeadBlock.ParentHash())
newHeadBlock = bc.genesisBlock
} else {
log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot)
log.Info("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot)
newHeadBlock = bc.genesisBlock
}
}
if beyondRoot || (enoughBeyondCount && root != common.Hash{}) || newHeadBlock.NumberU64() == 0 {
if enoughBeyondCount && (root != common.Hash{}) && rootNumber == 0 {
for {
lastBlockNum++
block := bc.GetBlockByNumber(lastBlockNum)
if block == nil {
break
}
if block.Root() == root {
rootNumber = block.NumberU64()
break
}
}
}
if beyondRoot || newHeadBlock.NumberU64() == 0 {
if newHeadBlock.NumberU64() == 0 {
resetState()
} else if !bc.HasState(newHeadBlock.Root()) {
Expand Down Expand Up @@ -1180,8 +1171,7 @@ func (bc *BlockChain) Stop() {
// - HEAD-127: So we have a hard limit on the number of blocks reexecuted
if !bc.cacheConfig.TrieDirtyDisabled {
triedb := bc.triedb
var once sync.Once

var once sync.Once
for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
if number := bc.CurrentBlock().Number.Uint64(); number > offset {
recent := bc.GetBlockByNumber(number - offset)
Expand All @@ -1191,9 +1181,9 @@ func (bc *BlockChain) Stop() {
} else {
rawdb.WriteSafePointBlockNumber(bc.db, recent.NumberU64())
once.Do(func() {
rawdb.WriteHeadBlockHash(bc.db, recent.Hash())
rawdb.WriteHeadBlockHash(bc.db, recent.Hash())
})
}
}
}
}
if snapBase != (common.Hash{}) {
Expand All @@ -1216,7 +1206,7 @@ func (bc *BlockChain) Stop() {
for !bc.triegc.Empty() {
triedb.Dereference(bc.triegc.PopItem())
}
if size, _ := triedb.Size(); size != 0 {
if _, size, _, _ := triedb.Size(); size != 0 {
log.Error("Dangling trie nodes after full cleanup")
}
}
Expand Down Expand Up @@ -1620,8 +1610,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
if current := block.NumberU64(); current > bc.triesInMemory {
// If we exceeded our memory allowance, flush matured singleton nodes to disk
var (
nodes, imgs = triedb.Size()
limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
_, nodes, _, imgs = triedb.Size()
limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
)
if nodes > limit || imgs > 4*1024*1024 {
triedb.Cap(limit - ethdb.IdealBatchSize)
Expand Down Expand Up @@ -2105,8 +2095,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
stats.processed++
stats.usedGas += usedGas

dirty, _ := bc.triedb.Size()
stats.report(chain, it.index, dirty, setHead)
trieDiffNodes, trieBufNodes, trieImmutableBufNodes, _ := bc.triedb.Size()
stats.report(chain, it.index, trieDiffNodes, trieBufNodes, trieImmutableBufNodes, setHead)

if !setHead {
// After merge we expect few side chains. Simply count
Expand Down
10 changes: 8 additions & 2 deletions core/blockchain_insert.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ const statsReportLimit = 8 * time.Second

// report prints statistics if some number of blocks have been processed
// or more than a few seconds have passed since the last message.
func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize, setHead bool) {
func (st *insertStats) report(chain []*types.Block, index int, trieDiffNodes, trieBufNodes, trieImmutableBufNodes common.StorageSize, setHead bool) {
// Fetch the timings for the batch
var (
now = mclock.Now()
Expand All @@ -63,7 +63,13 @@ func (st *insertStats) report(chain []*types.Block, index int, dirty common.Stor
if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
}
context = append(context, []interface{}{"dirty", dirty}...)
if trieDiffNodes != 0 { // pathdb
context = append(context, []interface{}{"triediffs", trieDiffNodes}...)
context = append(context, []interface{}{"triedirty", trieBufNodes}...)
context = append(context, []interface{}{"trieimutabledirty", trieImmutableBufNodes}...)
} else {
context = append(context, []interface{}{"triedirty", trieBufNodes}...)
}

if st.queued > 0 {
context = append(context, []interface{}{"queued", st.queued}...)
Expand Down
Loading

0 comments on commit 1dca486

Please sign in to comment.