diff --git a/.circleci/config.yml b/.circleci/config.yml index 5ad0777aa..a10f09644 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -19,7 +19,7 @@ orbs: docker: circleci/docker@2.1.1 executors: - linux-executor: # this executor is for general testing and packaging linux binaries + tar-linux-amd64-executor: # this executor is for linux-amd64 tar packaging working_directory: ~/go/src/github.com/kaiachain/kaia resource_class: medium docker: @@ -27,7 +27,44 @@ executors: auth: username: $DOCKER_LOGIN password: $DOCKER_PASSWORD - linux-others-executor: # this executor is for test-others job + tar-linux-arm64-executor: # this executor is for linux-arm64 tar packaging + working_directory: ~/go/src/github.com/kaiachain/kaia + resource_class: arm.medium + docker: + - image: kaiachain/build_base:1.12-go.1.22.1-solc0.8.13-ubuntu-20.04-arm + auth: + username: $DOCKER_LOGIN + password: $DOCKER_PASSWORD + rpm-linux-amd64-executor: # this executor is for linux-amd64 rpm packaging + working_directory: /go/src/github.com/kaiachain/kaia + resource_class: medium + docker: + - image: kaiachain/circleci-rpmbuild:1.22.1-gcc7 + auth: + username: $DOCKER_LOGIN + password: $DOCKER_PASSWORD + rpm-linux-arm64-executor: # this executor is for linux-arm64 rpm packaging + working_directory: /go/src/github.com/kaiachain/kaia + resource_class: arm.medium + docker: + - image: kaiachain/circleci-rpmbuild:1.22.1-gcc7-arm + auth: + username: $DOCKER_LOGIN + password: $DOCKER_PASSWORD + tar-darwin-arm64-executor: # this executor is for darwin-arm64 tar packaging + working_directory: ~/go/src/github.com/kaiachain/kaia + macos: + xcode: 14.2.0 + resource_class: macos.m1.medium.gen1 + test-executor: # this executor is for general test jobs + working_directory: ~/go/src/github.com/kaiachain/kaia + resource_class: medium + docker: + - image: kaiachain/build_base:1.12-go.1.22.1-solc0.8.13-ubuntu-20.04 + auth: + username: $DOCKER_LOGIN + password: $DOCKER_PASSWORD + test-others-executor: # this executor is for test-others job working_directory: /go/src/github.com/kaiachain/kaia resource_class: xlarge docker: @@ -57,36 +94,12 @@ executors: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER KAFKA_CFG_INTER_BROKER_LISTENER_NAME: PLAINTEXT - darwin-executor: # this executor is for packaging darwin binaries - working_directory: ~/go/src/github.com/kaiachain/kaia - macos: - xcode: 14.2.0 - resource_class: macos.m1.medium.gen1 - rpm-executor: # this executor is for packaging rpm binaries - working_directory: /go/src/github.com/kaiachain/kaia - docker: - - image: kaiachain/circleci-rpmbuild:1.22.1-gcc7 - auth: - username: $DOCKER_LOGIN - password: $DOCKER_PASSWORD default: working_directory: ~/go/src/github.com/kaiachain/kaia docker: - image: cimg/go:1.22.1 commands: - install-darwin-dependencies: - description: Install dependencies on darwin machine - steps: - - run: - name: "install darwin dependencies" - command: | - # install awscli - brew install awscli - # install golang - curl -O https://dl.google.com/go/go1.22.1.darwin-arm64.tar.gz - mkdir $HOME/go1.22.1 - tar -C $HOME/go1.22.1 -xzf go1.22.1.darwin-arm64.tar.gz pre-build: description: "before build, set version" steps: @@ -108,75 +121,74 @@ commands: echo "this is not RC version" fi echo "export KAIA_VERSION=$(go run build/rpm/main.go version)" >> $BASH_ENV - build-packaging: - description: "Build for each OS/Network" + packaging-and-upload: + description: "Build and upload tar/rpm packages to S3 for each OS/Network" parameters: + package-type: + type: string + default: "tar" os-network: type: string default: "linux-amd64" - baobab: - type: string - default: "" - steps: - - run: - name: "build and packaging" - command: | - export GOPATH=~/go - export PATH=$HOME/go1.22.1/go/bin:$PATH - make all - for item in kcn kpn ken kscn kspn ksen kbn kgen homi; do - ./build/package-tar.sh << parameters.baobab >> << parameters.os-network >> $item - done - upload-repo: - description: "upload packaging tar.gz" - parameters: - item: - type: string - default: "kcn kpn ken kgen kscn kbn kspn ksen homi" steps: + - checkout - run: - name: "upload S3 repo" + name: "install darwin dependencies when package is darwin" command: | - export GOPATH=~/go - export PATH=$HOME/go1.22.1/go/bin:$PATH - KAIA_VERSION=$(go run build/rpm/main.go version) - for item in << parameters.item >>; do aws s3 cp packages/${item}-*.tar.gz s3://$FRONTEND_BUCKET/packages/kaia/$KAIA_VERSION/; done - rpm-tagging: - description: "rpm tagging for cypress" - steps: + if [[ << parameters.os-network >> = "darwin-arm64" ]]; then + # install awscli + brew install awscli + # install golang + curl -O https://dl.google.com/go/go1.22.1.darwin-arm64.tar.gz + mkdir $HOME/go1.22.1 + tar -C $HOME/go1.22.1 -xzf go1.22.1.darwin-arm64.tar.gz + # Set GOPATH and update PATH + echo 'export GOPATH=~/go' >> ~/.bashrc + echo 'export PATH=$HOME/go1.22.1/go/bin:$PATH' >> ~/.bashrc + source ~/.bashrc + fi + - pre-build - run: - name: "rpm tagging" + name: "build binaries" command: | - for item in kcn kpn ken kscn kspn ksen kbn kgen homi; do - ./build/package-rpm.sh $item - done + source ~/.bashrc + make all - run: - name: "upload S3 repo" + name: "build mainnet and kairos packages" command: | - PLATFORM_SUFFIX=$(uname -s | tr '[:upper:]' '[:lower:]')-$(uname -m) - KAIA_VERSION=$(go run build/rpm/main.go version) - - for item in kcn kpn ken kscn kspn ksen kbn kgen homi; do - TARGET_RPM=$(find $item-linux-x86_64/rpmbuild/RPMS/x86_64/ | awk -v pat="$item(d)?-v" '$0~pat') - aws s3 cp $TARGET_RPM s3://$FRONTEND_BUCKET/packages/rhel/7/kaia/ - aws s3 cp $TARGET_RPM s3://$FRONTEND_BUCKET/packages/kaia/$KAIA_VERSION/ + source ~/.bashrc + second_parameter="" + if [[ << parameters.package-type >> = "tar" ]]; then + second_parameter=<< parameters.os-network >> + fi + for item in kcn kpn ken kgen kscn kbn kspn ksen homi; do + ./build/package-<< parameters.package-type >>.sh $second_parameter $item done - rpm-tagging-baobab: - description: "rpm tagging for baobab" - steps: - - run: - name: "rpm tagging baobab" - command: | + for item in kcn kpn ken; do - ./build/package-rpm.sh -b $item + ./build/package-<< parameters.package-type >>.sh -b $second_parameter $item done - run: - name: "upload S3 repo" + name: "upload << parameters.package-type >>-<> packages to S3 repo" command: | - for item in kcn kpn ken; do - TARGET_RPM=$(find $item-linux-x86_64/rpmbuild/RPMS/x86_64/ | awk -v pat="$item(d)?-kairos-v" '$0~pat') - aws s3 cp $TARGET_RPM s3://$FRONTEND_BUCKET/packages/rhel/7/kaia/ - aws s3 cp $TARGET_RPM s3://$FRONTEND_BUCKET/packages/kaia/$KAIA_VERSION/ + source ~/.bashrc + KAIA_VERSION=$(go run build/rpm/main.go version) + PLATFORM_SUFFIX=$(uname -s | tr '[:upper:]' '[:lower:]')-$(uname -m) + + for item in kcn kpn ken kcn-kairos kpn-kairos ken-kairos kgen kscn kbn kspn ksen homi; do + if [[ << parameters.package-type >> = "tar" ]]; then + aws s3 cp packages/${item}-v*.tar.gz s3://$FRONTEND_BUCKET/packages/kaia/$KAIA_VERSION/ + elif [[ << parameters.package-type >> = "rpm" ]]; then + BINARY=$item + KAIROS="" + if [[ $BINARY = *-kairos ]]; then + BINARY="${BINARY%-kairos}" + KAIROS="-kairos" + fi + TARGET_RPM=$(find $BINARY-$PLATFORM_SUFFIX/rpmbuild/RPMS/$(uname -m)/ | awk -v pat="$BINARY(d)?$KAIROS-v" '$0~pat') + aws s3 cp $TARGET_RPM s3://$FRONTEND_BUCKET/packages/rhel/7/kaia/ + aws s3 cp $TARGET_RPM s3://$FRONTEND_BUCKET/packages/kaia/$KAIA_VERSION/ + fi done createrepo-update: steps: @@ -331,27 +343,26 @@ commands: curl https://bootstrap.pypa.io/get-pip.py | python pip3 install -r requirements.txt python3 main.py --protocol rpc + jobs: build: - executor: linux-executor + executor: test-executor steps: - checkout - pre-build - run: name: "Build" command: make all - test-linter: - executor: linux-executor + executor: test-executor steps: - checkout - run: name: "Run golangci-lint" no_output_timeout: 30m command: go run build/ci.go lint -v --new-from-rev=dev - test-datasync: - executor: linux-others-executor + executor: test-others-executor steps: - checkout - wait-other-containers-ready @@ -359,27 +370,24 @@ jobs: name: "Run test datasync" no_output_timeout: 30m command: make test-datasync - test-networks: - executor: linux-executor + executor: test-executor steps: - checkout - run: name: "Run test networks" no_output_timeout: 30m command: make test-networks - test-node: - executor: linux-executor + executor: test-executor steps: - checkout - run: name: "Run test node" no_output_timeout: 30m command: make test-node - test-tests: - executor: linux-executor + executor: test-executor steps: - checkout - run: @@ -388,9 +396,8 @@ jobs: command: | git clone --depth 1 https://$TEST_TOKEN@github.com/kaiachain/kaia-core-tests.git tests/testdata make test-tests - test-others: - executor: linux-others-executor + executor: test-others-executor resource_class: xlarge steps: - checkout @@ -400,29 +407,25 @@ jobs: no_output_timeout: 30m command: | make test-others - test-rpc: - executor: linux-executor + executor: test-executor steps: - checkout - pre-build - run-rpc - pass-tests: executor: default steps: - run: name: "tests pass!" command: echo "tests pass!" - tagger-verify: executor: default steps: - checkout - tagger-verify - coverage: - executor: linux-others-executor + executor: test-others-executor resource_class: xlarge steps: - checkout @@ -444,9 +447,8 @@ jobs: path: /tmp/coverage_reports - codecov/upload: file: /tmp/coverage_reports/coverage_* - linters: - executor: linux-executor + executor: test-executor steps: - checkout - run: @@ -459,84 +461,56 @@ jobs: - notify-success - store_artifacts: path: /tmp/linter_reports - rpc-tester-report: - executor: linux-executor + executor: test-executor steps: - checkout - pre-build - run-rpc - notify-failure - notify-success - - packaging-linux: - executor: linux-executor - resource_class: large - steps: - - checkout - - pre-build - - build-packaging - - upload-repo - - packaging-linux-baobab: - executor: linux-executor - resource_class: large - steps: - - checkout - - pre-build - - build-packaging: - baobab: "-b" - - upload-repo: - item: "kcn kpn ken" - packaging-darwin: - executor: darwin-executor - steps: - - checkout - - install-darwin-dependencies - - pre-build - - build-packaging: + rpm-linux-amd64-packaging: + executor: rpm-linux-amd64-executor + steps: + - packaging-and-upload: + os-network: "linux-amd64" + package-type: "rpm" + tar-linux-amd64-packaging: + executor: tar-linux-amd64-executor + steps: + - packaging-and-upload: + os-network: "linux-amd64" + package-type: "tar" + rpm-linux-arm64-packaging: + executor: rpm-linux-arm64-executor + steps: + - packaging-and-upload: + os-network: "linux-arm64" + package-type: "rpm" + tar-linux-arm64-packaging: + executor: tar-linux-arm64-executor + steps: + - packaging-and-upload: + os-network: "linux-arm64" + package-type: "tar" + tar-darwin-arm64-packaging: + executor: tar-darwin-arm64-executor + steps: + - packaging-and-upload: os-network: "darwin-arm64" - - upload-repo - - packaging-darwin-baobab: - executor: darwin-executor - steps: - - checkout - - install-darwin-dependencies - - pre-build - - build-packaging: - os-network: "darwin-arm64" - baobab: "-b" - - upload-repo: - item: "kcn kpn ken" - rpm-tagged: - executor: rpm-executor - steps: - - checkout - - pre-build - - rpm-tagging - - rpm-tagged-baobab: - executor: rpm-executor - steps: - - checkout - - pre-build - - rpm-tagging-baobab - + package-type: "tar" deploy-rpm-public: - executor: rpm-executor + executor: rpm-linux-amd64-executor steps: - add_ssh_keys - createrepo-update - notify-failure - notify-success - tag-verify: - executor: linux-executor + executor: test-executor steps: - checkout - tag-verify - release-PR: executor: default steps: @@ -545,7 +519,6 @@ jobs: - make-pr - notify-failure - notify-success - major-tagging: executor: default steps: @@ -579,7 +552,6 @@ workflows: ignore: /.*/ - test-rpc: filters: *filter-only-version-tag - - pass-tests: requires: - build @@ -591,7 +563,6 @@ workflows: - tag-verify - tagger-verify filters: *filter-version-not-release - - docker/publish: # for dev branch filters: branches: @@ -605,7 +576,6 @@ workflows: use-remote-docker: true remote-docker-version: 20.10.14 use-buildkit: true - - docker/publish: # for release versions filters: tags: @@ -621,59 +591,49 @@ workflows: use-remote-docker: true remote-docker-version: 20.10.14 use-buildkit: true - - tag-verify: filters: *filter-only-version-tag - - deploy-rpm-public: requires: - - rpm-tagged - - rpm-tagged-baobab - - packaging-linux - - packaging-linux-baobab - - packaging-darwin - - packaging-darwin-baobab + - rpm-linux-amd64-packaging + - tar-linux-amd64-packaging + - rpm-linux-arm64-packaging + - tar-linux-arm64-packaging + - tar-darwin-arm64-packaging filters: tags: only: /^v[0-9]+\.[0-9]+\.[0-9]/ branches: ignore: /.*/ - - release-PR: requires: - - rpm-tagged - - rpm-tagged-baobab - - packaging-linux - - packaging-linux-baobab - - packaging-darwin - - packaging-darwin-baobab + - rpm-linux-amd64-packaging + - tar-linux-amd64-packaging + - rpm-linux-arm64-packaging + - tar-linux-arm64-packaging + - tar-darwin-arm64-packaging filters: tags: only: /^v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+.*/ branches: ignore: /.*/ - - - rpm-tagged: - filters: *filter-only-version-tag - requires: - - pass-tests - - rpm-tagged-baobab: + - rpm-linux-amd64-packaging: filters: *filter-only-version-tag requires: - pass-tests - - packaging-linux: + - tar-linux-amd64-packaging: filters: *filter-only-version-tag requires: - pass-tests - - packaging-linux-baobab: + - rpm-linux-arm64-packaging: filters: *filter-only-version-tag requires: - pass-tests - - packaging-darwin: + - tar-linux-arm64-packaging: filters: *filter-only-version-tag requires: - pass-tests - - packaging-darwin-baobab: + - tar-darwin-arm64-packaging: filters: *filter-only-version-tag requires: - pass-tests diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 63dd6e5c7..cc42d7e35 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -6,7 +6,7 @@ # @global-owner1 and @global-owner2 will be requested for # review when someone opens a pull request. #* @global-owner1 @global-owner2 -* @aidan-kwon @blukat29 @yoomee1313 +* @aidan-kwon @blukat29 @yoomee1313 @ian0371 # Order is important; the last matching pattern takes the most # precedence. When someone opens a pull request that only @@ -37,25 +37,3 @@ # directory in the root of your repository. #/docs/ @doctocat -/.circleci/ @yoomee1313 @JayChoi1736 @2dvorak @sjnam -/.github/ @yoomee1313 @JayChoi1736 @2dvorak @sjnam -/accounts/ @blukat29 @hyunsooda @2dvorak @kjeom @JayChoi1736 -/api/ @blukat29 @hyunsooda @2dvorak @kjeom @JayChoi1736 -/blockchain/ @yoomee1313 @sjnam @ian0371 @hyeonLewis -/build/ @yoomee1313 @JayChoi1736 @2dvorak @sjnam -/client/ @blukat29 @hyunsooda @2dvorak @kjeom @JayChoi1736 -/cmd/ @blukat29 @hyunsooda @2dvorak @kjeom @JayChoi1736 -/consensus/ @blukat29 @ian0371 @hyeonLewis -/console/ @blukat29 @hyunsooda @2dvorak @kjeom @JayChoi1736 -/contracts/ @blukat29 @ian0371 @hyeonLewis -/crypto/ @blukat29 @ian0371 @hyeonLewis -/datasync/ @aidan-kwon @blukat29 @jeongkyun-oh -/db_migration/ @aidan-kwon @blukat29 @jeongkyun-oh -/governance/ @blukat29 @ian0371 @hyeonLewis -/networks/ @yoomee1313 @JayChoi1736 @2dvorak @sjnam -/node/ @blukat29 @hyunsooda @2dvorak @kjeom @JayChoi1736 -/params/ @yoomee1313 @sjnam @ian0371 @hyeonLewis -/reward/ @blukat29 @ian0371 @hyeonLewis -/snapshot/ @aidan-kwon @blukat29 @jeongkyun-oh -/storage/ @aidan-kwon @blukat29 @jeongkyun-oh -/work/ @yoomee1313 @sjnam @ian0371 @hyeonLewis diff --git a/api/api_ethereum.go b/api/api_ethereum.go index 5e39915bd..0287187ce 100644 --- a/api/api_ethereum.go +++ b/api/api_ethereum.go @@ -693,7 +693,7 @@ func (api *EthereumAPI) Call(ctx context.Context, args EthTransactionArgs, block // EstimateGas returns an estimate of the amount of gas needed to execute the // given transaction against the current pending block. -func (api *EthereumAPI) EstimateGas(ctx context.Context, args EthTransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) { +func (api *EthereumAPI) EstimateGas(ctx context.Context, args EthTransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *EthStateOverride) (hexutil.Uint64, error) { bcAPI := api.publicBlockChainAPI.b bNrOrHash := rpc.NewBlockNumberOrHashWithNumber(rpc.LatestBlockNumber) if blockNrOrHash != nil { @@ -703,7 +703,7 @@ func (api *EthereumAPI) EstimateGas(ctx context.Context, args EthTransactionArgs if rpcGasCap := bcAPI.RPCGasCap(); rpcGasCap != nil { gasCap = rpcGasCap.Uint64() } - return EthDoEstimateGas(ctx, bcAPI, args, bNrOrHash, gasCap) + return EthDoEstimateGas(ctx, bcAPI, args, bNrOrHash, overrides, gasCap) } // GetBlockTransactionCountByNumber returns the number of transactions in the block with the given block number. @@ -1434,7 +1434,7 @@ func EthDoCall(ctx context.Context, b Backend, args EthTransactionArgs, blockNrO return result, nil } -func EthDoEstimateGas(ctx context.Context, b Backend, args EthTransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, gasCap uint64) (hexutil.Uint64, error) { +func EthDoEstimateGas(ctx context.Context, b Backend, args EthTransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *EthStateOverride, gasCap uint64) (hexutil.Uint64, error) { // Use zero address if sender unspecified. if args.From == nil { args.From = new(common.Address) @@ -1459,11 +1459,14 @@ func EthDoEstimateGas(ctx context.Context, b Backend, args EthTransactionArgs, b if err != nil { return 0, err } + if err := overrides.Apply(state); err != nil { + return 0, err + } balance := state.GetBalance(*args.From) // from can't be nil executable := func(gas uint64) (bool, *blockchain.ExecutionResult, error) { args.Gas = (*hexutil.Uint64)(&gas) - result, err := EthDoCall(ctx, b, args, rpc.NewBlockNumberOrHashWithNumber(rpc.LatestBlockNumber), nil, b.RPCEVMTimeout(), gasCap) + result, err := EthDoCall(ctx, b, args, blockNrOrHash, overrides, b.RPCEVMTimeout(), gasCap) if err != nil { if errors.Is(err, blockchain.ErrIntrinsicGas) { return true, nil, nil // Special case, raise gas limit diff --git a/api/api_ethereum_test.go b/api/api_ethereum_test.go index b7efcb64d..94737df5e 100644 --- a/api/api_ethereum_test.go +++ b/api/api_ethereum_test.go @@ -2600,6 +2600,6 @@ func TestEthereumAPI_EstimateGas(t *testing.T) { defer mockCtrl.Finish() testEstimateGas(t, mockBackend, func(args EthTransactionArgs) (hexutil.Uint64, error) { - return api.EstimateGas(context.Background(), args, nil) + return api.EstimateGas(context.Background(), args, nil, nil) }) } diff --git a/api/api_public_blockchain.go b/api/api_public_blockchain.go index 628280b57..7fac30e78 100644 --- a/api/api_public_blockchain.go +++ b/api/api_public_blockchain.go @@ -400,15 +400,19 @@ func (s *PublicBlockChainAPI) EstimateComputationCost(ctx context.Context, args } // EstimateGas returns an estimate of the amount of gas needed to execute the given transaction against the latest block. -func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (hexutil.Uint64, error) { +func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *EthStateOverride) (hexutil.Uint64, error) { gasCap := uint64(0) if rpcGasCap := s.b.RPCGasCap(); rpcGasCap != nil { gasCap = rpcGasCap.Uint64() } - return DoEstimateGas(ctx, s.b, args, s.b.RPCEVMTimeout(), new(big.Int).SetUint64(gasCap)) + bNrOrHash := rpc.NewBlockNumberOrHashWithNumber(rpc.LatestBlockNumber) + if blockNrOrHash != nil { + bNrOrHash = *blockNrOrHash + } + return DoEstimateGas(ctx, s.b, args, bNrOrHash, overrides, s.b.RPCEVMTimeout(), new(big.Int).SetUint64(gasCap)) } -func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, timeout time.Duration, gasCap *big.Int) (hexutil.Uint64, error) { +func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *EthStateOverride, timeout time.Duration, gasCap *big.Int) (hexutil.Uint64, error) { var feeCap *big.Int if args.GasPrice != nil { feeCap = args.GasPrice.ToInt() @@ -416,16 +420,19 @@ func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, timeout time.D feeCap = common.Big0 } - state, _, err := b.StateAndHeaderByNumber(ctx, rpc.LatestBlockNumber) + state, _, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) if err != nil { return 0, err } + if err := overrides.Apply(state); err != nil { + return 0, err + } balance := state.GetBalance(args.From) // from can't be nil // Create a helper to check if a gas allowance results in an executable transaction executable := func(gas uint64) (bool, *blockchain.ExecutionResult, error) { args.Gas = hexutil.Uint64(gas) - result, _, err := DoCall(ctx, b, args, rpc.NewBlockNumberOrHashWithNumber(rpc.LatestBlockNumber), vm.Config{ComputationCostLimit: params.OpcodeComputationCostLimitInfinite}, timeout, gasCap) + result, _, err := DoCall(ctx, b, args, blockNrOrHash, vm.Config{ComputationCostLimit: params.OpcodeComputationCostLimitInfinite}, timeout, gasCap) if err != nil { if errors.Is(err, blockchain.ErrIntrinsicGas) { return true, nil, nil // Special case, raise gas limit diff --git a/api/api_public_blockchain_test.go b/api/api_public_blockchain_test.go index 19c3ceec5..1b7147aef 100644 --- a/api/api_public_blockchain_test.go +++ b/api/api_public_blockchain_test.go @@ -59,6 +59,6 @@ func TestKaiaAPI_EstimateGas(t *testing.T) { if ethArgs.Value != nil { args.Value = *ethArgs.Value } - return api.EstimateGas(context.Background(), args) + return api.EstimateGas(context.Background(), args, nil, nil) }) } diff --git a/api/tx_args.go b/api/tx_args.go index 0f8621d0f..939814779 100644 --- a/api/tx_args.go +++ b/api/tx_args.go @@ -702,7 +702,7 @@ func (args *EthTransactionArgs) setDefaults(ctx context.Context, b Backend) erro if rpcGasCap := b.RPCGasCap(); rpcGasCap != nil { gasCap = rpcGasCap.Uint64() } - estimated, err := EthDoEstimateGas(ctx, b, callArgs, pendingBlockNr, gasCap) + estimated, err := EthDoEstimateGas(ctx, b, callArgs, pendingBlockNr, nil, gasCap) if err != nil { return err } diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index 58ff3b0e7..d6b304d36 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -181,8 +181,10 @@ type BlockChain struct { currentBlock atomic.Value // Current head of the block chain currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) - stateCache state.Database // State database to reuse between imports (contains state cache) - futureBlocks *lru.Cache // future blocks are blocks added for later processing + stateCache state.Database // State database to reuse between imports (contains state cache) + + // future blocks are blocks added for later processing + futureBlocks *lru.Cache quit chan struct{} // blockchain quit channel running int32 // running must be called atomically @@ -1088,8 +1090,8 @@ func (bc *BlockChain) Stop() { if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil { logger.Error("Failed to journal state snapshot", "err", err) } + bc.snaps.Release() } - triedb := bc.stateCache.TrieDB() if !bc.isArchiveMode() { number := bc.CurrentBlock().NumberU64() diff --git a/blockchain/state/database.go b/blockchain/state/database.go index 6aa711907..720bc42e4 100644 --- a/blockchain/state/database.go +++ b/blockchain/state/database.go @@ -26,8 +26,8 @@ import ( "errors" "fmt" - "github.com/VictoriaMetrics/fastcache" "github.com/kaiachain/kaia/common" + "github.com/kaiachain/kaia/common/lru" "github.com/kaiachain/kaia/storage/database" "github.com/kaiachain/kaia/storage/statedb" ) @@ -144,7 +144,7 @@ func NewDatabaseWithNewCache(db database.DBManager, cacheConfig *statedb.TrieNod return &cachingDB{ db: statedb.NewDatabaseWithNewCache(db, cacheConfig), codeSizeCache: getCodeSizeCache(), - codeCache: fastcache.New(codeCacheSize), + codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), } } @@ -155,14 +155,14 @@ func NewDatabaseWithExistingCache(db database.DBManager, cache statedb.TrieNodeC return &cachingDB{ db: statedb.NewDatabaseWithExistingCache(db, cache), codeSizeCache: getCodeSizeCache(), - codeCache: fastcache.New(codeCacheSize), + codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), } } type cachingDB struct { db *statedb.Database codeSizeCache common.Cache - codeCache *fastcache.Cache + codeCache *lru.SizeConstrainedCache[common.Hash, []byte] } // OpenTrie opens the main account trie at a specific root hash. @@ -187,12 +187,12 @@ func (db *cachingDB) CopyTrie(t Trie) Trie { // ContractCode retrieves a particular contract's code. func (db *cachingDB) ContractCode(codeHash common.Hash) ([]byte, error) { - if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 { + if code, _ := db.codeCache.Get(codeHash); len(code) > 0 { return code, nil } code := db.db.DiskDB().ReadCode(codeHash) if len(code) > 0 { - db.codeCache.Set(codeHash.Bytes(), code) + db.codeCache.Add(codeHash, code) db.codeSizeCache.Add(codeHash, len(code)) return code, nil } @@ -201,7 +201,7 @@ func (db *cachingDB) ContractCode(codeHash common.Hash) ([]byte, error) { // DeleteCode deletes a particular contract's code. func (db *cachingDB) DeleteCode(codeHash common.Hash) { - db.codeCache.Del(codeHash.Bytes()) + db.codeCache.DeleteCode(codeHash) db.db.DiskDB().DeleteCode(codeHash) } @@ -209,12 +209,12 @@ func (db *cachingDB) DeleteCode(codeHash common.Hash) { // code can't be found in the cache, then check the existence with **new** // db scheme. func (db *cachingDB) ContractCodeWithPrefix(codeHash common.Hash) ([]byte, error) { - if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 { + if code, _ := db.codeCache.Get(codeHash); len(code) > 0 { return code, nil } code := db.db.DiskDB().ReadCodeWithPrefix(codeHash) if len(code) > 0 { - db.codeCache.Set(codeHash.Bytes(), code) + db.codeCache.Add(codeHash, code) db.codeSizeCache.Add(codeHash, len(code)) return code, nil } diff --git a/build/ci.go b/build/ci.go index be5181bcc..5fd00a63c 100644 --- a/build/ci.go +++ b/build/ci.go @@ -215,6 +215,9 @@ func buildFlags(env build.Environment) (flags []string) { // Pass the static link flag to the external linker. // By default, cmd/link will use external linking mode when non-standard cgo packages are involved. ld = append(ld, "-linkmode", "external", "-extldflags", "-static") + // Even if the binary is statically linked, some glibc features (e.g., libnss) can have dependencies on + // specific version of glibc. So we should try to avoid using them. + flags = append(flags, "-tags", "osusergo,netgo") } if env.IsKaiaRaceDetectionOn { flags = append(flags, "-race") diff --git a/build/package-tar.sh b/build/package-tar.sh index 96e670ca4..bcdfc04c8 100755 --- a/build/package-tar.sh +++ b/build/package-tar.sh @@ -9,7 +9,7 @@ set -e function printUsage { echo "Usage: ${0} [-b] " echo " -b: use Kairos configuration" - echo " : linux-386 | linux-amd64 | darwin-arm64 | windows-386 | windows-amd64" + echo " : linux-386 | linux-amd64 | linux-arm64 | darwin-arm64" echo " : kcn | kpn | ken | kbn | kscn | kspn | ksen | kgen | homi" echo "" echo " ${0} linux-amd64 kcn" @@ -39,20 +39,16 @@ case "$SUBCOMMAND" in PLATFORM_SUFFIX="linux-amd64" shift ;; - darwin-arm64) - PLATFORM_SUFFIX="darwin-arm64" - shift - ;; - windows-386) - PLATFORM_SUFFIX="windows-386" + linux-arm64) + PLATFORM_SUFFIX="linux-arm64" shift ;; - windows-amd64) - PLATFORM_SUFFIX="windows-amd64" + darwin-arm64) + PLATFORM_SUFFIX="darwin-arm64" shift ;; *) - echo "Undefined architecture for packaging. Supported architectures: linux-386, linux-amd64, darwin-arm64, windows-386, windows-amd64" + echo "Undefined architecture for packaging. Supported architectures: linux-386, linux-amd64, linux-arm64, darwin-arm64" printUsage ;; esac @@ -127,3 +123,6 @@ fi # Compress! mkdir -p packages tar czf packages/$KAIA_PACKAGE_NAME $PACK_NAME + +# Clean-up code except the packages folder +rm -rf ${PACK_NAME} diff --git a/common/lru/basiclru.go b/common/lru/basiclru.go new file mode 100644 index 000000000..c60f59706 --- /dev/null +++ b/common/lru/basiclru.go @@ -0,0 +1,223 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package lru implements generically-typed LRU caches. +package lru + +// BasicLRU is a simple LRU cache. +// +// This type is not safe for concurrent use. +// The zero value is not valid, instances must be created using NewCache. +type BasicLRU[K comparable, V any] struct { + list *list[K] + items map[K]cacheItem[K, V] + cap int +} + +type cacheItem[K any, V any] struct { + elem *listElem[K] + value V +} + +// NewBasicLRU creates a new LRU cache. +func NewBasicLRU[K comparable, V any](capacity int) BasicLRU[K, V] { + if capacity <= 0 { + capacity = 1 + } + c := BasicLRU[K, V]{ + items: make(map[K]cacheItem[K, V]), + list: newList[K](), + cap: capacity, + } + return c +} + +// Add adds a value to the cache. Returns true if an item was evicted to store the new item. +func (c *BasicLRU[K, V]) Add(key K, value V) (evicted bool) { + item, ok := c.items[key] + if ok { + // Already exists in cache. + item.value = value + c.items[key] = item + c.list.moveToFront(item.elem) + return false + } + + var elem *listElem[K] + if c.Len() >= c.cap { + elem = c.list.removeLast() + delete(c.items, elem.v) + evicted = true + } else { + elem = new(listElem[K]) + } + + // Store the new item. + // Note that, if another item was evicted, we re-use its list element here. + elem.v = key + c.items[key] = cacheItem[K, V]{elem, value} + c.list.pushElem(elem) + return evicted +} + +// Contains reports whether the given key exists in the cache. +func (c *BasicLRU[K, V]) Contains(key K) bool { + _, ok := c.items[key] + return ok +} + +// Get retrieves a value from the cache. This marks the key as recently used. +func (c *BasicLRU[K, V]) Get(key K) (value V, ok bool) { + item, ok := c.items[key] + if !ok { + return value, false + } + c.list.moveToFront(item.elem) + return item.value, true +} + +// GetOldest retrieves the least-recently-used item. +// Note that this does not update the item's recency. +func (c *BasicLRU[K, V]) GetOldest() (key K, value V, ok bool) { + lastElem := c.list.last() + if lastElem == nil { + return key, value, false + } + key = lastElem.v + item := c.items[key] + return key, item.value, true +} + +// Len returns the current number of items in the cache. +func (c *BasicLRU[K, V]) Len() int { + return len(c.items) +} + +// Peek retrieves a value from the cache, but does not mark the key as recently used. +func (c *BasicLRU[K, V]) Peek(key K) (value V, ok bool) { + item, ok := c.items[key] + return item.value, ok +} + +// Purge empties the cache. +func (c *BasicLRU[K, V]) Purge() { + c.list.init() + for k := range c.items { + delete(c.items, k) + } +} + +// Remove drops an item from the cache. Returns true if the key was present in cache. +func (c *BasicLRU[K, V]) Remove(key K) bool { + item, ok := c.items[key] + if ok { + delete(c.items, key) + c.list.remove(item.elem) + } + return ok +} + +// RemoveOldest drops the least recently used item. +func (c *BasicLRU[K, V]) RemoveOldest() (key K, value V, ok bool) { + lastElem := c.list.last() + if lastElem == nil { + return key, value, false + } + + key = lastElem.v + item := c.items[key] + delete(c.items, key) + c.list.remove(lastElem) + return key, item.value, true +} + +// Keys returns all keys in the cache. +func (c *BasicLRU[K, V]) Keys() []K { + keys := make([]K, 0, len(c.items)) + return c.list.appendTo(keys) +} + +// list is a doubly-linked list holding items of type he. +// The zero value is not valid, use newList to create lists. +type list[T any] struct { + root listElem[T] +} + +type listElem[T any] struct { + next *listElem[T] + prev *listElem[T] + v T +} + +func newList[T any]() *list[T] { + l := new(list[T]) + l.init() + return l +} + +// init reinitializes the list, making it empty. +func (l *list[T]) init() { + l.root.next = &l.root + l.root.prev = &l.root +} + +// pushElem adds an element to the front of the list. +func (l *list[T]) pushElem(e *listElem[T]) { + e.prev = &l.root + e.next = l.root.next + l.root.next = e + e.next.prev = e +} + +// moveToFront makes 'node' the head of the list. +func (l *list[T]) moveToFront(e *listElem[T]) { + e.prev.next = e.next + e.next.prev = e.prev + l.pushElem(e) +} + +// remove removes an element from the list. +func (l *list[T]) remove(e *listElem[T]) { + e.prev.next = e.next + e.next.prev = e.prev + e.next, e.prev = nil, nil +} + +// removeLast removes the last element of the list. +func (l *list[T]) removeLast() *listElem[T] { + last := l.last() + if last != nil { + l.remove(last) + } + return last +} + +// last returns the last element of the list, or nil if the list is empty. +func (l *list[T]) last() *listElem[T] { + e := l.root.prev + if e == &l.root { + return nil + } + return e +} + +// appendTo appends all list elements to a slice. +func (l *list[T]) appendTo(slice []T) []T { + for e := l.root.prev; e != &l.root; e = e.prev { + slice = append(slice, e.v) + } + return slice +} diff --git a/common/lru/basiclru_test.go b/common/lru/basiclru_test.go new file mode 100644 index 000000000..29812bda1 --- /dev/null +++ b/common/lru/basiclru_test.go @@ -0,0 +1,255 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package lru + +import ( + crand "crypto/rand" + "fmt" + "io" + "math/rand" + "testing" +) + +// Some of these test cases were adapted +// from https://github.com/hashicorp/golang-lru/blob/master/simplelru/lru_test.go + +func TestBasicLRU(t *testing.T) { + cache := NewBasicLRU[int, int](128) + + for i := 0; i < 256; i++ { + cache.Add(i, i) + } + if cache.Len() != 128 { + t.Fatalf("bad len: %v", cache.Len()) + } + + // Check that Keys returns least-recent key first. + keys := cache.Keys() + if len(keys) != 128 { + t.Fatal("wrong Keys() length", len(keys)) + } + for i, k := range keys { + v, ok := cache.Peek(k) + if !ok { + t.Fatalf("expected key %d be present", i) + } + if v != k { + t.Fatalf("expected %d == %d", k, v) + } + if v != i+128 { + t.Fatalf("wrong value at key %d: %d, want %d", i, v, i+128) + } + } + + for i := 0; i < 128; i++ { + _, ok := cache.Get(i) + if ok { + t.Fatalf("%d should be evicted", i) + } + } + for i := 128; i < 256; i++ { + _, ok := cache.Get(i) + if !ok { + t.Fatalf("%d should not be evicted", i) + } + } + + for i := 128; i < 192; i++ { + ok := cache.Remove(i) + if !ok { + t.Fatalf("%d should be in cache", i) + } + ok = cache.Remove(i) + if ok { + t.Fatalf("%d should not be in cache", i) + } + _, ok = cache.Get(i) + if ok { + t.Fatalf("%d should be deleted", i) + } + } + + // Request item 192. + cache.Get(192) + // It should be the last item returned by Keys(). + for i, k := range cache.Keys() { + if (i < 63 && k != i+193) || (i == 63 && k != 192) { + t.Fatalf("out of order key: %v", k) + } + } + + cache.Purge() + if cache.Len() != 0 { + t.Fatalf("bad len: %v", cache.Len()) + } + if _, ok := cache.Get(200); ok { + t.Fatalf("should contain nothing") + } +} + +func TestBasicLRUAddExistingKey(t *testing.T) { + cache := NewBasicLRU[int, int](1) + + cache.Add(1, 1) + cache.Add(1, 2) + + v, _ := cache.Get(1) + if v != 2 { + t.Fatal("wrong value:", v) + } +} + +// This test checks GetOldest and RemoveOldest. +func TestBasicLRUGetOldest(t *testing.T) { + cache := NewBasicLRU[int, int](128) + for i := 0; i < 256; i++ { + cache.Add(i, i) + } + + k, _, ok := cache.GetOldest() + if !ok { + t.Fatalf("missing") + } + if k != 128 { + t.Fatalf("bad: %v", k) + } + + k, _, ok = cache.RemoveOldest() + if !ok { + t.Fatalf("missing") + } + if k != 128 { + t.Fatalf("bad: %v", k) + } + + k, _, ok = cache.RemoveOldest() + if !ok { + t.Fatalf("missing oldest item") + } + if k != 129 { + t.Fatalf("wrong oldest item: %v", k) + } +} + +// Test that Add returns true/false if an eviction occurred +func TestBasicLRUAddReturnValue(t *testing.T) { + cache := NewBasicLRU[int, int](1) + if cache.Add(1, 1) { + t.Errorf("first add shouldn't have evicted") + } + if !cache.Add(2, 2) { + t.Errorf("second add should have evicted") + } +} + +// This test verifies that Contains doesn't change item recency. +func TestBasicLRUContains(t *testing.T) { + cache := NewBasicLRU[int, int](2) + cache.Add(1, 1) + cache.Add(2, 2) + if !cache.Contains(1) { + t.Errorf("1 should be in the cache") + } + cache.Add(3, 3) + if cache.Contains(1) { + t.Errorf("Contains should not have updated recency of 1") + } +} + +// Test that Peek doesn't update recent-ness +func TestBasicLRUPeek(t *testing.T) { + cache := NewBasicLRU[int, int](2) + cache.Add(1, 1) + cache.Add(2, 2) + if v, ok := cache.Peek(1); !ok || v != 1 { + t.Errorf("1 should be set to 1") + } + cache.Add(3, 3) + if cache.Contains(1) { + t.Errorf("should not have updated recent-ness of 1") + } +} + +func BenchmarkLRU(b *testing.B) { + var ( + capacity = 1000 + indexes = make([]int, capacity*20) + keys = make([]string, capacity) + values = make([][]byte, capacity) + ) + for i := range indexes { + indexes[i] = rand.Intn(capacity) + } + for i := range keys { + b := make([]byte, 32) + crand.Read(b) + keys[i] = string(b) + crand.Read(b) + values[i] = b + } + + var sink []byte + + b.Run("Add/BasicLRU", func(b *testing.B) { + cache := NewBasicLRU[int, int](capacity) + for i := 0; i < b.N; i++ { + cache.Add(i, i) + } + }) + b.Run("Get/BasicLRU", func(b *testing.B) { + cache := NewBasicLRU[string, []byte](capacity) + for i := 0; i < capacity; i++ { + index := indexes[i] + cache.Add(keys[index], values[index]) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + k := keys[indexes[i%len(indexes)]] + v, ok := cache.Get(k) + if ok { + sink = v + } + } + }) + + // // vs. github.com/hashicorp/golang-lru/simplelru + // b.Run("Add/simplelru.LRU", func(b *testing.B) { + // cache, _ := simplelru.NewLRU(capacity, nil) + // for i := 0; i < b.N; i++ { + // cache.Add(i, i) + // } + // }) + // b.Run("Get/simplelru.LRU", func(b *testing.B) { + // cache, _ := simplelru.NewLRU(capacity, nil) + // for i := 0; i < capacity; i++ { + // index := indexes[i] + // cache.Add(keys[index], values[index]) + // } + // + // b.ResetTimer() + // for i := 0; i < b.N; i++ { + // k := keys[indexes[i%len(indexes)]] + // v, ok := cache.Get(k) + // if ok { + // sink = v.([]byte) + // } + // } + // }) + + fmt.Fprintln(io.Discard, sink) +} diff --git a/common/lru/blob_lru.go b/common/lru/blob_lru.go new file mode 100644 index 000000000..77125d428 --- /dev/null +++ b/common/lru/blob_lru.go @@ -0,0 +1,104 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package lru + +import ( + "math" + "sync" +) + +// blobType is the type constraint for values stored in SizeConstrainedCache. +type blobType interface { + ~[]byte | ~string +} + +// SizeConstrainedCache is a cache where capacity is in bytes (instead of item count). When the cache +// is at capacity, and a new item is added, older items are evicted until the size +// constraint is met. +// +// OBS: This cache assumes that items are content-addressed: keys are unique per content. +// In other words: two Add(..) with the same key K, will always have the same value V. +type SizeConstrainedCache[K comparable, V blobType] struct { + size uint64 + maxSize uint64 + lru BasicLRU[K, V] + lock sync.Mutex +} + +// NewSizeConstrainedCache creates a new size-constrained LRU cache. +func NewSizeConstrainedCache[K comparable, V blobType](maxSize uint64) *SizeConstrainedCache[K, V] { + return &SizeConstrainedCache[K, V]{ + size: 0, + maxSize: maxSize, + lru: NewBasicLRU[K, V](math.MaxInt), + } +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +// OBS: This cache assumes that items are content-addressed: keys are unique per content. +// In other words: two Add(..) with the same key K, will always have the same value V. +// OBS: The value is _not_ copied on Add, so the caller must not modify it afterwards. +func (c *SizeConstrainedCache[K, V]) Add(key K, value V) (evicted bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Unless it is already present, might need to evict something. + // OBS: If it is present, we still call Add internally to bump the recentness. + if !c.lru.Contains(key) { + targetSize := c.size + uint64(len(value)) + for targetSize > c.maxSize { + evicted = true + _, v, ok := c.lru.RemoveOldest() + if !ok { + // list is now empty. Break + break + } + targetSize -= uint64(len(v)) + } + c.size = targetSize + } + c.lru.Add(key, value) + return evicted +} + +// Get looks up a key's value from the cache. +func (c *SizeConstrainedCache[K, V]) Get(key K) (V, bool) { + c.lock.Lock() + defer c.lock.Unlock() + + return c.lru.Get(key) +} + +// DeleteCode deletes the code from the cache. It's for testing purpose. +// If it has the key, reduce the size and return true. +func (c *SizeConstrainedCache[K, V]) DeleteCode(key K) bool { + c.lock.Lock() + defer c.lock.Unlock() + + value, ok := c.lru.Get(key) + if !ok { + return false + } + + // it shouldn't happen. + if c.size-uint64(len(value)) < 0 { + return false + } + + c.size -= uint64(len(value)) + return c.lru.Remove(key) +} diff --git a/common/lru/blob_lru_test.go b/common/lru/blob_lru_test.go new file mode 100644 index 000000000..ca1b0ddd7 --- /dev/null +++ b/common/lru/blob_lru_test.go @@ -0,0 +1,155 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package lru + +import ( + "encoding/binary" + "fmt" + "testing" +) + +type testKey [8]byte + +func mkKey(i int) (key testKey) { + binary.LittleEndian.PutUint64(key[:], uint64(i)) + return key +} + +func TestSizeConstrainedCache(t *testing.T) { + lru := NewSizeConstrainedCache[testKey, []byte](100) + var want uint64 + // Add 11 items of 10 byte each. First item should be swapped out + for i := 0; i < 11; i++ { + k := mkKey(i) + v := fmt.Sprintf("value-%04d", i) + lru.Add(k, []byte(v)) + want += uint64(len(v)) + if want > 100 { + want = 100 + } + if have := lru.size; have != want { + t.Fatalf("size wrong, have %d want %d", have, want) + } + } + // Zero:th should be evicted + { + k := mkKey(0) + if _, ok := lru.Get(k); ok { + t.Fatalf("should be evicted: %v", k) + } + } + // Elems 1-11 should be present + for i := 1; i < 11; i++ { + k := mkKey(i) + want := fmt.Sprintf("value-%04d", i) + have, ok := lru.Get(k) + if !ok { + t.Fatalf("missing key %v", k) + } + if string(have) != want { + t.Fatalf("wrong value, have %v want %v", have, want) + } + } +} + +// This test adds inserting an element exceeding the max size. +func TestSizeConstrainedCacheOverflow(t *testing.T) { + lru := NewSizeConstrainedCache[testKey, []byte](100) + + // Add 10 items of 10 byte each, filling the cache + for i := 0; i < 10; i++ { + k := mkKey(i) + v := fmt.Sprintf("value-%04d", i) + lru.Add(k, []byte(v)) + } + // Add one single large elem. We expect it to swap out all entries. + { + k := mkKey(1337) + v := make([]byte, 200) + lru.Add(k, v) + } + // Elems 0-9 should be missing + for i := 1; i < 10; i++ { + k := mkKey(i) + if _, ok := lru.Get(k); ok { + t.Fatalf("should be evicted: %v", k) + } + } + // The size should be accurate + if have, want := lru.size, uint64(200); have != want { + t.Fatalf("size wrong, have %d want %d", have, want) + } + // Adding one small item should swap out the large one + { + i := 0 + k := mkKey(i) + v := fmt.Sprintf("value-%04d", i) + lru.Add(k, []byte(v)) + if have, want := lru.size, uint64(10); have != want { + t.Fatalf("size wrong, have %d want %d", have, want) + } + } +} + +// This checks what happens when inserting the same k/v multiple times. +func TestSizeConstrainedCacheSameItem(t *testing.T) { + lru := NewSizeConstrainedCache[testKey, []byte](100) + + // Add one 10 byte-item 10 times. + k := mkKey(0) + v := fmt.Sprintf("value-%04d", 0) + for i := 0; i < 10; i++ { + lru.Add(k, []byte(v)) + } + + // The size should be accurate. + if have, want := lru.size, uint64(10); have != want { + t.Fatalf("size wrong, have %d want %d", have, want) + } +} + +// This tests that empty/nil values are handled correctly. +func TestSizeConstrainedCacheEmpties(t *testing.T) { + lru := NewSizeConstrainedCache[testKey, []byte](100) + + // This test abuses the lru a bit, using different keys for identical value(s). + for i := 0; i < 10; i++ { + lru.Add(testKey{byte(i)}, []byte{}) + lru.Add(testKey{byte(255 - i)}, nil) + } + + // The size should not count, only the values count. So this could be a DoS + // since it basically has no cap, and it is intentionally overloaded with + // different-keyed 0-length values. + if have, want := lru.size, uint64(0); have != want { + t.Fatalf("size wrong, have %d want %d", have, want) + } + + for i := 0; i < 10; i++ { + if v, ok := lru.Get(testKey{byte(i)}); !ok { + t.Fatalf("test %d: expected presence", i) + } else if v == nil { + t.Fatalf("test %d, v is nil", i) + } + + if v, ok := lru.Get(testKey{byte(255 - i)}); !ok { + t.Fatalf("test %d: expected presence", i) + } else if v != nil { + t.Fatalf("test %d, v is not nil", i) + } + } +} diff --git a/common/lru/lru.go b/common/lru/lru.go new file mode 100644 index 000000000..45965adb0 --- /dev/null +++ b/common/lru/lru.go @@ -0,0 +1,95 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package lru + +import "sync" + +// Cache is a LRU cache. +// This type is safe for concurrent use. +type Cache[K comparable, V any] struct { + cache BasicLRU[K, V] + mu sync.Mutex +} + +// NewCache creates an LRU cache. +func NewCache[K comparable, V any](capacity int) *Cache[K, V] { + return &Cache[K, V]{cache: NewBasicLRU[K, V](capacity)} +} + +// Add adds a value to the cache. Returns true if an item was evicted to store the new item. +func (c *Cache[K, V]) Add(key K, value V) (evicted bool) { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Add(key, value) +} + +// Contains reports whether the given key exists in the cache. +func (c *Cache[K, V]) Contains(key K) bool { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Contains(key) +} + +// Get retrieves a value from the cache. This marks the key as recently used. +func (c *Cache[K, V]) Get(key K) (value V, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Get(key) +} + +// Len returns the current number of items in the cache. +func (c *Cache[K, V]) Len() int { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Len() +} + +// Peek retrieves a value from the cache, but does not mark the key as recently used. +func (c *Cache[K, V]) Peek(key K) (value V, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Peek(key) +} + +// Purge empties the cache. +func (c *Cache[K, V]) Purge() { + c.mu.Lock() + defer c.mu.Unlock() + + c.cache.Purge() +} + +// Remove drops an item from the cache. Returns true if the key was present in cache. +func (c *Cache[K, V]) Remove(key K) bool { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Remove(key) +} + +// Keys returns all keys of items currently in the LRU. +func (c *Cache[K, V]) Keys() []K { + c.mu.Lock() + defer c.mu.Unlock() + + return c.cache.Keys() +} diff --git a/consensus/istanbul/core/vrank.go b/consensus/istanbul/core/vrank.go index caba944a1..15a63fed3 100644 --- a/consensus/istanbul/core/vrank.go +++ b/consensus/istanbul/core/vrank.go @@ -155,7 +155,7 @@ func (v *Vrank) Log() { v.updateMetrics() - logger.Info("VRank", "seq", v.view.Sequence.Int64(), + logger.Debug("VRank", "seq", v.view.Sequence.Int64(), "round", v.view.Round.Int64(), "bitmap", v.Bitmap(), "late", encodeDurationBatch(lateCommits), diff --git a/consensus/istanbul/validator/weighted.go b/consensus/istanbul/validator/weighted.go index b52e78d3a..7405d25a6 100644 --- a/consensus/istanbul/validator/weighted.go +++ b/consensus/istanbul/validator/weighted.go @@ -131,6 +131,8 @@ func RecoverWeightedCouncilProposer(valSet istanbul.ValidatorSet, proposerAddrs _, val := weightedCouncil.GetByAddress(proposerAddr) if val == nil { logger.Error("Proposer is not available now.", "proposer address", proposerAddr) + // The valSet.proposers hasn't been used since Randao HF. + continue } proposers = append(proposers, val) diff --git a/console/jsre/deps/web3.js b/console/jsre/deps/web3.js index 2077d9f44..745e30fed 100644 --- a/console/jsre/deps/web3.js +++ b/console/jsre/deps/web3.js @@ -3848,8 +3848,10 @@ var inputBlockNumberFormatter = function (blockNumber) { return undefined; } else if (isPredefinedBlockNumber(blockNumber)) { return blockNumber; + } else if (/^\d+$/.test(blockNumber) || /^0x[0-9a-fA-F]+$/.test(blockNumber)) { // test if input is decmial or hex + return utils.toHex(blockNumber); } - return utils.toHex(blockNumber); + throw new Error(`input block number(${blockNumber}) is invalid`); }; var inputEmptyFormatter = function (a) { diff --git a/console/web3ext/web3ext.go b/console/web3ext/web3ext.go index b1b540ef5..e314f446b 100644 --- a/console/web3ext/web3ext.go +++ b/console/web3ext/web3ext.go @@ -71,8 +71,8 @@ web3._extend({ new web3._extend.Method({ name: 'estimateGas', call: 'eth_estimateGas', - params: 2, - inputFormatter: [web3._extend.formatters.inputCallFormatter, web3._extend.formatters.inputBlockNumberFormatter], + params: 3, + inputFormatter: [web3._extend.formatters.inputCallFormatter, web3._extend.formatters.inputBlockNumberFormatter, null], outputFormatter: web3._extend.utils.toDecimal }), new web3._extend.Method({ diff --git a/contracts/docs/PublicDelegation.md b/contracts/docs/PublicDelegation.md index affe95c81..8949e8dff 100644 --- a/contracts/docs/PublicDelegation.md +++ b/contracts/docs/PublicDelegation.md @@ -4,7 +4,7 @@ The public delegation (PD) is a non-transferable ERC-4626 based contract that al It mints the tokenized shares to the delegator, which is called `pdKAIA`. The `pdKAIA` is a non-transferable interest-bearing token that represents the delegator's share of the total KAIA delegated to the GC. As rewards are compounded, the exchange rate of `pdKAIA` to KAIA increases. The delegator can burn the `pdKAIA` to get the KAIA back. All the math comes from the ERC-4626 standard. -Unlike usual ERC-4626 vault contracts, the reward is directly distributed to PD contract by state modification at the consensus-level. The reward will be automatically compounded to the CnSV3 contract. +Unlike usual ERC-4626 vault contracts, the KAIA rewards are directly distributed to PD contract by state modification at the consensus-level. It is deployed during setup process of CnSV3 contract, namely `setPublicDelegation` function. The PD is only compatible with the CnSV3 contract. @@ -37,7 +37,7 @@ The PD contract can collect the commission from the rewards. The commission info Related functions: - `updateCommissionTo(addr)`: Update the commission receiver address. -- `updateCommissionRate(commissionRate)`: Update the commission rate. `MAX_COMMISSION_RATE` is 3,000, which is 30%. +- `updateCommissionRate(commissionRate)`: Update the commission rate. `MAX_COMMISSION_RATE` is 10,000, which is 100%. The commission is calculated and sent to the commission receiver whenever the rewards are compounded. The commission is calculated as follows: @@ -56,7 +56,7 @@ The delegators will receive the corresponding `pdKAIA` as shares. The shares are $pdKAIA = ⌊stakedKAIA * totalShares / totalStakedKAIA⌋$ -Note that `totalStakedKAIA⌋` includes the current rewards, which are not yet compounded. +Note that `totalStakedKAIA` includes the current rewards, which are not yet compounded. ### Withdrawal diff --git a/go.mod b/go.mod index 55880dd99..3b87654f2 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22.1 require ( github.com/Shopify/sarama v1.26.4 - github.com/VictoriaMetrics/fastcache v1.6.0 + github.com/VictoriaMetrics/fastcache v1.12.2 github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d github.com/aristanetworks/goarista v0.0.0-20191001182449-186a6201b8ef github.com/aws/aws-sdk-go v1.34.28 diff --git a/go.sum b/go.sum index 6fabd3fa2..04ecb7772 100644 --- a/go.sum +++ b/go.sum @@ -69,8 +69,8 @@ github.com/Shopify/sarama v1.26.4 h1:+17TxUq/PJEAfZAll0T7XJjSgQWCpaQSoki/x5yN8o8 github.com/Shopify/sarama v1.26.4/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o= -github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -296,7 +296,6 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -849,7 +848,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -860,6 +858,7 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/governance/api.go b/governance/api.go index 95c40d4da..9f40f7b40 100644 --- a/governance/api.go +++ b/governance/api.go @@ -86,7 +86,7 @@ func (api *GovernanceKaiaAPI) NodeAddress() common.Address { // GetRewards returns detailed information of the block reward at a given block number. func (api *GovernanceKaiaAPI) GetRewards(num *rpc.BlockNumber) (*reward.RewardSpec, error) { blockNumber := uint64(0) - if num == nil || *num == rpc.LatestBlockNumber { + if num == nil || *num == rpc.LatestBlockNumber || *num == rpc.PendingBlockNumber { blockNumber = api.chain.CurrentBlock().NumberU64() } else { blockNumber = uint64(num.Int64()) @@ -331,7 +331,32 @@ func getParams(governance Engine, num *rpc.BlockNumber) (map[string]interface{}, if err != nil { return nil, err } - return pset.StrMap(), nil + sm := pset.StrMap() + + // To avoid confusion, override some parameters that are deprecated after hardforks. + // e.g., stakingupdateinterval is shown as 86400 but actually irrelevant (i.e. updated every block) + rule := governance.BlockChain().Config().Rules(new(big.Int).SetUint64(blockNumber)) + if rule.IsKore { + // Gini option deprecated since Kore, as All committee members have an equal chance + // of being elected block proposers. + if _, ok := sm["reward.useginicoeff"]; ok { + sm["reward.useginicoeff"] = false + } + } + if rule.IsRandao { + // Block proposer is randomly elected at every block with Randao, + // no more precalculated proposer list. + if _, ok := sm["reward.proposerupdateinterval"]; ok { + sm["reward.proposerupdateinterval"] = 1 + } + } + if rule.IsKaia { + // Staking information updated every block since Kaia. + if _, ok := sm["reward.stakingupdateinterval"]; ok { + sm["reward.stakingupdateinterval"] = 1 + } + } + return sm, nil } func (api *GovernanceAPI) GetStakingInfo(num *rpc.BlockNumber) (*reward.StakingInfo, error) { @@ -365,8 +390,11 @@ func checkStateForStakingInfo(governance Engine, blockNumber uint64) error { if !governance.BlockChain().Config().IsKaiaForkEnabled(big.NewInt(int64(blockNumber + 1))) { return nil } - - _, err := governance.BlockChain().StateAt(governance.BlockChain().GetHeaderByNumber(blockNumber).Root) + header := governance.BlockChain().GetHeaderByNumber(blockNumber) + if header == nil { + return errUnknownBlock + } + _, err := governance.BlockChain().StateAt(header.Root) return err } @@ -445,6 +473,7 @@ func getChainConfig(governance Engine, num *rpc.BlockNumber) *params.ChainConfig return nil } + // Fill in the non-governance-parameter fields of ChainConfig latestConfig := governance.BlockChain().Config() config := pset.ToChainConfig() config.ChainID = latestConfig.ChainID @@ -462,6 +491,24 @@ func getChainConfig(governance Engine, num *rpc.BlockNumber) *params.ChainConfig config.Kip160ContractAddress = latestConfig.Kip160ContractAddress config.RandaoCompatibleBlock = latestConfig.RandaoCompatibleBlock + // To avoid confusion, override some parameters that are deprecated after hardforks. + // e.g., stakingupdateinterval is shown as 86400 but actually irrelevant (i.e. updated every block) + rule := governance.BlockChain().Config().Rules(new(big.Int).SetUint64(blocknum)) + if rule.IsKore { + // Gini option deprecated since Kore, as All committee members have an equal chance + // of being elected block proposers. + config.Governance.Reward.UseGiniCoeff = false + } + if rule.IsRandao { + // Block proposer is randomly elected at every block with Randao, + // no more precalculated proposer list. + config.Governance.Reward.ProposerUpdateInterval = 1 + } + if rule.IsKaia { + // Staking information updated every block since Kaia. + config.Governance.Reward.StakingUpdateInterval = 1 + } + return config } diff --git a/node/cn/api.go b/node/cn/api.go index 344908eba..23548a8bb 100644 --- a/node/cn/api.go +++ b/node/cn/api.go @@ -433,10 +433,12 @@ func (api *PrivateDebugAPI) StorageRangeAt(ctx context.Context, blockHash common if block == nil { return StorageRangeResult{}, fmt.Errorf("block %#x not found", blockHash) } - _, _, _, statedb, err := api.cn.stateAtTransaction(block, txIndex, 0) + _, _, _, statedb, release, err := api.cn.stateAtTransaction(block, txIndex, 0) if err != nil { return StorageRangeResult{}, err } + defer release() + st := statedb.StorageTrie(contractAddress) if st == nil { return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress) diff --git a/node/cn/api_backend.go b/node/cn/api_backend.go index 58ba82f51..d5091f35a 100644 --- a/node/cn/api_backend.go +++ b/node/cn/api_backend.go @@ -41,6 +41,7 @@ import ( "github.com/kaiachain/kaia/governance" "github.com/kaiachain/kaia/networks/rpc" "github.com/kaiachain/kaia/node/cn/gasprice" + "github.com/kaiachain/kaia/node/cn/tracers" "github.com/kaiachain/kaia/params" "github.com/kaiachain/kaia/reward" "github.com/kaiachain/kaia/storage/database" @@ -399,11 +400,11 @@ func (b *CNAPIBackend) Engine() consensus.Engine { return b.cn.engine } -func (b *CNAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) { - return b.cn.stateAtBlock(block, reexec, base, checkLive, preferDisk) +func (b *CNAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, tracers.StateReleaseFunc, error) { + return b.cn.stateAtBlock(block, reexec, base, readOnly, preferDisk) } -func (b *CNAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (blockchain.Message, vm.BlockContext, vm.TxContext, *state.StateDB, error) { +func (b *CNAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (blockchain.Message, vm.BlockContext, vm.TxContext, *state.StateDB, tracers.StateReleaseFunc, error) { return b.cn.stateAtTransaction(block, txIndex, reexec) } diff --git a/node/cn/state_accessor.go b/node/cn/state_accessor.go index 4b83e57a9..93a0c5ca3 100644 --- a/node/cn/state_accessor.go +++ b/node/cn/state_accessor.go @@ -30,39 +30,59 @@ import ( "github.com/kaiachain/kaia/blockchain/types" "github.com/kaiachain/kaia/blockchain/vm" "github.com/kaiachain/kaia/common" + "github.com/kaiachain/kaia/node/cn/tracers" "github.com/kaiachain/kaia/reward" statedb2 "github.com/kaiachain/kaia/storage/statedb" ) +// noopReleaser is returned in case there is no operation expected +// for releasing state. +var noopReleaser = tracers.StateReleaseFunc(func() {}) + // stateAtBlock retrieves the state database associated with a certain block. // If no state is locally available for the given block, a number of blocks // are attempted to be reexecuted to generate the desired state. The optional -// base layer statedb can be passed then it's regarded as the statedb of the +// base layer statedb can be provided which is regarded as the statedb of the // parent block. +// +// An additional release function will be returned if the requested state is +// available. Release is expected to be invoked when the returned state is no longer needed. +// Its purpose is to prevent resource leaking. Though it can be noop in some cases. +// // Parameters: -// - block: The block for which we want the state (== state at the stateRoot of the parent) -// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state -// - base: If the caller is tracing multiple blocks, the caller can provide the parent state -// continuously from the callsite. -// - checklive: if true, then the live 'blockchain' state database is used. If the caller want to -// perform Commit or other 'save-to-disk' changes, this should be set to false to avoid -// storing trash persistently -// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is provided, -// it would be preferrable to start from a fresh state, if we have it on disk. -func (cn *CN) stateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) { +// - block: The block for which we want the state(state = block.Root) +// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state +// - base: If the caller is tracing multiple blocks, the caller can provide the parent +// state continuously from the callsite. +// - readOnly: If true, then the live 'blockchain' state database is used. No mutation should +// be made from caller, e.g. perform Commit or other 'save-to-disk' changes. +// Otherwise, the trash generated by caller may be persisted permanently. +// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is +// provided, it would be preferable to start from a fresh state, if we have it +// on disk. +func (cn *CN) stateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) { var ( current *types.Block database state.Database report = true origin = block.NumberU64() ) - // Check the live database first if we have the state fully available, use that. - if checkLive { - statedb, err = cn.blockchain.StateAt(block.Root()) - if err == nil { - return statedb, nil + // The state is only for reading purposes, check the state presence in + // live database. + if readOnly { + // The state is available in live database, create a reference + // on top to prevent garbage collection and return a release + // function to deref it. + if statedb, err = cn.blockchain.StateAt(block.Root()); err == nil { + statedb.Database().TrieDB().ReferenceRoot(block.Root()) + return statedb, func() { + statedb.Database().TrieDB().Dereference(block.Root()) + }, nil } } + // The state is both for reading and writing, or it's unavailable in disk, + // try to construct/recover the state over an ephemeral trie.Database for + // isolating the live one. if base != nil { if preferDisk { // Create an ephemeral trie.Database for isolating the live one. Otherwise @@ -70,27 +90,37 @@ func (cn *CN) stateAtBlock(block *types.Block, reexec uint64, base *state.StateD database = state.NewDatabaseWithExistingCache(cn.ChainDB(), cn.blockchain.StateCache().TrieDB().TrieNodeCache()) if statedb, err = state.New(block.Root(), database, nil, nil); err == nil { logger.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number()) - return statedb, nil + return statedb, noopReleaser, nil } } // The optional base statedb is given, mark the start point as parent block statedb, database, report = base, base.Database(), false current = cn.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) } else { - // Otherwise try to reexec blocks until we find a state or reach our limit + // Otherwise, try to reexec blocks until we find a state or reach our limit current = block // Create an ephemeral trie.Database for isolating the live one. Otherwise // the internal junks created by tracing will be persisted into the disk. database = state.NewDatabaseWithExistingCache(cn.ChainDB(), cn.blockchain.StateCache().TrieDB().TrieNodeCache()) + // If we didn't check the live database, do check state over ephemeral database, + // otherwise we would rewind past a persisted block (specific corner case is + // chain tracing from the genesis). + if !readOnly { + statedb, err = state.New(current.Root(), database, nil, nil) + if err == nil { + return statedb, noopReleaser, nil + } + } + // Database does not have the state for the given block, try to regenerate for i := uint64(0); i < reexec; i++ { if current.NumberU64() == 0 { - return nil, errors.New("genesis state is missing") + return nil, nil, errors.New("genesis state is missing") } parent := cn.blockchain.GetBlock(current.ParentHash(), current.NumberU64()-1) if parent == nil { - return nil, fmt.Errorf("missing block %v %d", current.ParentHash(), current.NumberU64()-1) + return nil, nil, fmt.Errorf("missing block %v %d", current.ParentHash(), current.NumberU64()-1) } current = parent @@ -102,13 +132,14 @@ func (cn *CN) stateAtBlock(block *types.Block, reexec uint64, base *state.StateD if err != nil { switch err.(type) { case *statedb2.MissingNodeError: - return nil, fmt.Errorf("historical state unavailable. tried regeneration but not possible, possibly due to state migration/pruning or global state saving interval is bigger than reexec value (reexec=%d)", reexec) + return nil, nil, fmt.Errorf("historical state unavailable. tried regeneration but not possible, possibly due to state migration/pruning or global state saving interval is bigger than reexec value (reexec=%d)", reexec) default: - return nil, err + return nil, nil, err } } } - // State was available at historical point, regenerate + // State is available at historical point, re-execute the blocks on top for + // the desired state. var ( start = time.Now() logged time.Time @@ -129,29 +160,30 @@ func (cn *CN) stateAtBlock(block *types.Block, reexec uint64, base *state.StateD } // Quit the state regeneration if time limit exceeds if cn.config.DisableUnsafeDebug && time.Since(start) > cn.config.StateRegenerationTimeLimit { - return nil, fmt.Errorf("this request has queried old states too long since it exceeds the state regeneration time limit(%s)", cn.config.StateRegenerationTimeLimit.String()) + return nil, nil, fmt.Errorf("this request has queried old states too long since it exceeds the state regeneration time limit(%s)", cn.config.StateRegenerationTimeLimit.String()) } // Preload StakingInfo from the current block and state. Needed for next block's engine.Finalize() post-Kaia. preloadedStakingBlockNums = append(preloadedStakingBlockNums, current.NumberU64()) if err := reward.PreloadStakingInfoWithState(current.Header(), statedb); err != nil { - return nil, fmt.Errorf("preloading staking info from block %d failed: %v", current.NumberU64(), err) + return nil, nil, fmt.Errorf("preloading staking info from block %d failed: %v", current.NumberU64(), err) } // Retrieve the next block to regenerate and process it next := current.NumberU64() + 1 if current = cn.blockchain.GetBlockByNumber(next); current == nil { - return nil, fmt.Errorf("block #%d not found", next) + return nil, nil, fmt.Errorf("block #%d not found", next) } _, _, _, _, _, err := cn.blockchain.Processor().Process(current, statedb, vm.Config{}) if err != nil { - return nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err) + return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err) } // Finalize the state so any modifications are written to the trie root, err := statedb.Commit(true) if err != nil { - return nil, err + return nil, nil, err } - if err := statedb.Reset(root); err != nil { - return nil, fmt.Errorf("state reset after block %d failed: %v", current.NumberU64(), err) + statedb, err = state.New(root, database, nil, nil) + if err != nil { + return nil, nil, fmt.Errorf("state reset after block %d failed: %v", current.NumberU64(), err) } database.TrieDB().ReferenceRoot(root) if !common.EmptyHash(parent) { @@ -160,7 +192,7 @@ func (cn *CN) stateAtBlock(block *types.Block, reexec uint64, base *state.StateD err = fmt.Errorf("mistmatching state root block expected %x reexecuted %x", current.Header().Root, root) // Logging here because something went wrong when the state roots disagree even if the execution was successful. logger.Error("incorrectly regenerated historical state", "block", current.NumberU64(), "err", err) - return nil, fmt.Errorf("incorrectly regenerated historical state for block %d: %v", current.NumberU64(), err) + return nil, nil, fmt.Errorf("incorrectly regenerated historical state for block %d: %v", current.NumberU64(), err) } } parent = root @@ -170,28 +202,28 @@ func (cn *CN) stateAtBlock(block *types.Block, reexec uint64, base *state.StateD logger.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) } - return statedb, nil + return statedb, func() { database.TrieDB().Dereference(block.Root()) }, nil } // stateAtTransaction returns the execution environment of a certain transaction. -func (cn *CN) stateAtTransaction(block *types.Block, txIndex int, reexec uint64) (blockchain.Message, vm.BlockContext, vm.TxContext, *state.StateDB, error) { +func (cn *CN) stateAtTransaction(block *types.Block, txIndex int, reexec uint64) (blockchain.Message, vm.BlockContext, vm.TxContext, *state.StateDB, tracers.StateReleaseFunc, error) { // Short circuit if it's genesis block. if block.NumberU64() == 0 { - return nil, vm.BlockContext{}, vm.TxContext{}, nil, errors.New("no transaction in genesis") + return nil, vm.BlockContext{}, vm.TxContext{}, nil, nil, errors.New("no transaction in genesis") } // Create the parent state database parent := cn.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) if parent == nil { - return nil, vm.BlockContext{}, vm.TxContext{}, nil, fmt.Errorf("parent %#x not found", block.ParentHash()) + return nil, vm.BlockContext{}, vm.TxContext{}, nil, nil, fmt.Errorf("parent %#x not found", block.ParentHash()) } // Lookup the statedb of parent block from the live database, // otherwise regenerate it on the flight. - statedb, err := cn.stateAtBlock(parent, reexec, nil, true, false) + statedb, release, err := cn.stateAtBlock(parent, reexec, nil, true, false) if err != nil { - return nil, vm.BlockContext{}, vm.TxContext{}, nil, err + return nil, vm.BlockContext{}, vm.TxContext{}, nil, nil, err } if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.BlockContext{}, vm.TxContext{}, statedb, nil + return nil, vm.BlockContext{}, vm.TxContext{}, statedb, release, nil } // Recompute transactions up to the target index. signer := types.MakeSigner(cn.blockchain.Config(), block.Number()) @@ -200,22 +232,22 @@ func (cn *CN) stateAtTransaction(block *types.Block, txIndex int, reexec uint64) msg, err := tx.AsMessageWithAccountKeyPicker(signer, statedb, block.NumberU64()) if err != nil { logger.Warn("stateAtTransition failed", "hash", tx.Hash(), "block", block.NumberU64(), "err", err) - return nil, vm.BlockContext{}, vm.TxContext{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + return nil, vm.BlockContext{}, vm.TxContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } txContext := blockchain.NewEVMTxContext(msg, block.Header(), cn.chainConfig) blockContext := blockchain.NewEVMBlockContext(block.Header(), cn.blockchain, nil) if idx == txIndex { - return msg, blockContext, txContext, statedb, nil + return msg, blockContext, txContext, statedb, release, nil } // Not yet the searched for transaction, execute on top of the current state vmenv := vm.NewEVM(blockContext, txContext, statedb, cn.blockchain.Config(), &vm.Config{}) if _, err := blockchain.ApplyMessage(vmenv, msg); err != nil { - return nil, vm.BlockContext{}, vm.TxContext{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + return nil, vm.BlockContext{}, vm.TxContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } // Ensure any modifications are committed to the state // Since Kaia is forked after EIP158/161 (a.k.a Spurious Dragon), deleting empty object is always effective statedb.Finalise(true, true) } - return nil, vm.BlockContext{}, vm.TxContext{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) + return nil, vm.BlockContext{}, vm.TxContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) } diff --git a/node/cn/tracers/api.go b/node/cn/tracers/api.go index f6cd7b3dc..d5ccf7643 100644 --- a/node/cn/tracers/api.go +++ b/node/cn/tracers/api.go @@ -80,6 +80,10 @@ var ( heavyAPIRequestCount int32 = 0 ) +// StateReleaseFunc is used to deallocate resources held by constructing a +// historical state for tracing purposes. +type StateReleaseFunc func() + // Backend interface provides the common API services with access to necessary functions. type Backend interface { HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) @@ -94,8 +98,8 @@ type Backend interface { // StateAtBlock returns the state corresponding to the stateroot of the block. // N.B: For executing transactions on block N, the required stateRoot is block N-1, // so this method should be called with the parent. - StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) - StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (blockchain.Message, vm.BlockContext, vm.TxContext, *state.StateDB, error) + StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) + StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (blockchain.Message, vm.BlockContext, vm.TxContext, *state.StateDB, StateReleaseFunc, error) } // CommonAPI contains @@ -214,7 +218,7 @@ type txTraceResult struct { type blockTraceTask struct { statedb *state.StateDB // Intermediate state prepped for tracing block *types.Block // Block to trace the transactions from - rootref common.Hash // Trie root reference held for this task + release StateReleaseFunc // The function to release the held resource for this task results []*txTraceResult // Trace results procudes by the task } @@ -274,8 +278,36 @@ func (api *UnsafeAPI) TraceChain(ctx context.Context, start, end rpc.BlockNumber return sub, err } +// releaser is a helper tool responsible for caching the release +// callbacks of tracing state. +type releaser struct { + releases []StateReleaseFunc + lock sync.Mutex +} + +func (r *releaser) add(release StateReleaseFunc) { + r.lock.Lock() + defer r.lock.Unlock() + + r.releases = append(r.releases, release) +} + +func (r *releaser) call() { + r.lock.Lock() + defer r.lock.Unlock() + + for _, release := range r.releases { + release() + } + r.releases = r.releases[:0] +} + // traceChain configures a new tracer according to the provided configuration, and -// executes all the transactions contained within. +// executes all the transactions contained within. The tracing chain range includes +// the end block but excludes the start one. The return value will be one item per +// transaction, dependent on the requested tracer. +// The tracing procedure should be aborted in case the closed signal is received. +// // The traceChain operates in two modes: subscription mode and rpc mode // - if notifier and sub is not nil, it works as a subscription mode and returns nothing // - if those parameters are nil, it works as a rpc mode and returns the block trace results, so it can pass the result through rpc-call @@ -297,15 +329,17 @@ func (api *CommonAPI) traceChain(start, end *types.Block, config *TraceConfig, n tasks = make(chan *blockTraceTask, threads) results = make(chan *blockTraceTask, threads) localctx = context.Background() + reler = new(releaser) ) for th := 0; th < threads; th++ { pend.Add(1) go func() { defer pend.Done() - // Fetch and execute the next block trace tasks + // Fetch and execute the block trace tasks for task := range tasks { signer := types.MakeSigner(api.backend.ChainConfig(), task.block.Number()) + blockCtx := blockchain.NewEVMBlockContext(task.block.Header(), newChainContext(localctx, api.backend), nil) // Trace all the transactions contained within for i, tx := range task.block.Transactions() { @@ -317,7 +351,6 @@ func (api *CommonAPI) traceChain(start, end *types.Block, config *TraceConfig, n } txCtx := blockchain.NewEVMTxContext(msg, task.block.Header(), api.backend.ChainConfig()) - blockCtx := blockchain.NewEVMBlockContext(task.block.Header(), newChainContext(localctx, api.backend), nil) res, err := api.traceTx(localctx, msg, blockCtx, txCtx, task.statedb, config) if err != nil { @@ -328,6 +361,10 @@ func (api *CommonAPI) traceChain(start, end *types.Block, config *TraceConfig, n task.statedb.Finalise(true, true) task.results[i] = &txTraceResult{TxHash: tx.Hash(), Result: res} } + // Tracing state is used up, queue it for de-referencing + reler.add(task.release) + + // Stream the result back to the result catcher or abort on teardown if notifier != nil { // Stream the result back to the user or abort on teardown select { @@ -342,22 +379,26 @@ func (api *CommonAPI) traceChain(start, end *types.Block, config *TraceConfig, n }() } // Start a goroutine to feed all the blocks into the tracers - begin := time.Now() go func() { var ( logged time.Time + begin = time.Now() number uint64 traced uint64 failed error - parent common.Hash statedb *state.StateDB + release StateReleaseFunc ) // Ensure everything is properly cleaned up on any exit path defer func() { close(tasks) pend.Wait() + // Clean out any pending derefs. + reler.call() + + // Log the chain result switch { case failed != nil: logger.Warn("Chain tracing failed", "start", start.NumberU64(), "end", end.NumberU64(), "transactions", traced, "elapsed", time.Since(begin), "err", failed) @@ -368,7 +409,7 @@ func (api *CommonAPI) traceChain(start, end *types.Block, config *TraceConfig, n } close(results) }() - var preferDisk bool + // Feed all the blocks both into the tracer, as well as fast process concurrently for number = start.NumberU64(); number < end.NumberU64(); number++ { if notifier != nil { @@ -384,51 +425,48 @@ func (api *CommonAPI) traceChain(start, end *types.Block, config *TraceConfig, n logged = time.Now() logger.Info("Tracing chain segment", "start", start.NumberU64(), "end", end.NumberU64(), "current", number, "transactions", traced, "elapsed", time.Since(begin)) } - // Retrieve the parent state to trace on top + // Retrieve the parent block and target block for tracing. block, err := api.blockByNumber(localctx, rpc.BlockNumber(number)) if err != nil { failed = err break } - // Prepare the statedb for tracing. Don't use the live database for - // tracing to avoid persisting state junks into the database. - statedb, err = api.backend.StateAtBlock(localctx, block, reexec, statedb, false, preferDisk) + next, err := api.blockByNumber(localctx, rpc.BlockNumber(number+1)) if err != nil { failed = err break } - if trieDb := statedb.Database().TrieDB(); trieDb != nil { - // Hold the reference for tracer, will be released at the final stage - trieDb.ReferenceRoot(block.Root()) - - // Release the parent state because it's already held by the tracer - if !common.EmptyHash(parent) { - trieDb.Dereference(parent) - } - // Prefer disk if the trie db memory grows too much - s1, s2, s3 := trieDb.Size() - if !preferDisk && (s1+s2+s3) > defaultTracechainMemLimit { - logger.Info("Switching to prefer-disk mode for tracing", "size", s1+s2+s3) - preferDisk = true - } + // Prepare the statedb for tracing. Don't use the live database for + // tracing to avoid persisting state junks into the database. Switch + // over to `preferDisk` mode only if the memory usage exceeds the + // limit, the trie database will be reconstructed from scratch only + // if the relevant state is available in disk. + var preferDisk bool + if statedb != nil { + s1, s2, s3 := statedb.Database().TrieDB().Size() + preferDisk = s1+s2+s3 > defaultTracechainMemLimit } - parent = block.Root() - - next, err := api.blockByNumber(localctx, rpc.BlockNumber(number+1)) + statedb, release, err = api.backend.StateAtBlock(localctx, block, reexec, statedb, false, preferDisk) if err != nil { failed = err break } + // Clean out any pending derefs. Note this step must be done after + // constructing tracing state, because the tracing state of block + // next depends on the parent state and construction may fail if + // we release too early. + reler.call() + // Send the block over to the concurrent tracers (if not in the fast-forward phase) txs := next.Transactions() if notifier != nil { select { - case tasks <- &blockTraceTask{statedb: statedb.Copy(), block: next, rootref: block.Root(), results: make([]*txTraceResult, len(txs))}: + case tasks <- &blockTraceTask{statedb: statedb.Copy(), block: next, release: release, results: make([]*txTraceResult, len(txs))}: case <-notifier.Closed(): return } } else { - tasks <- &blockTraceTask{statedb: statedb.Copy(), block: next, rootref: block.Root(), results: make([]*txTraceResult, len(txs))} + tasks <- &blockTraceTask{statedb: statedb.Copy(), block: next, release: release, results: make([]*txTraceResult, len(txs))} } traced += uint64(len(txs)) } @@ -449,10 +487,6 @@ func (api *CommonAPI) traceChain(start, end *types.Block, config *TraceConfig, n } done[uint64(result.Block)] = result - // Dereference any parent tries held in memory by this task - if res.statedb.Database().TrieDB() != nil { - res.statedb.Database().TrieDB().Dereference(res.rootref) - } if notifier != nil { // Stream completed traces to the user, aborting on the first error for result, ok := done[next]; ok; result, ok = done[next] { @@ -472,6 +506,7 @@ func (api *CommonAPI) traceChain(start, end *types.Block, config *TraceConfig, n } if notifier != nil { + // Keep reading the trace results and stream them to result channel. go waitForResult() return nil, nil } @@ -598,10 +633,12 @@ func (api *CommonAPI) traceBlock(ctx context.Context, block *types.Block, config reexec = *config.Reexec } - statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) + statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { return nil, err } + defer release() + // Execute all the transaction contained within the block concurrently var ( signer = types.MakeSigner(api.backend.ChainConfig(), block.Number()) @@ -695,10 +732,12 @@ func (api *CommonAPI) standardTraceBlockToFile(ctx context.Context, block *types if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) + statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { return nil, err } + defer release() + // Retrieve the tracing configurations, or use default values var ( logConfig vm.LogConfig @@ -811,10 +850,12 @@ func (api *CommonAPI) TraceTransaction(ctx context.Context, hash common.Hash, co if err != nil { return nil, err } - msg, blockCtx, txCtx, statedb, err := api.backend.StateAtTransaction(ctx, block, int(index), reexec) + msg, blockCtx, txCtx, statedb, release, err := api.backend.StateAtTransaction(ctx, block, int(index), reexec) if err != nil { return nil, err } + defer release() + // Trace the transaction and return return api.traceTx(ctx, msg, blockCtx, txCtx, statedb, config) } @@ -850,10 +891,11 @@ func (api *CommonAPI) TraceCall(ctx context.Context, args kaiaapi.CallArgs, bloc if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true, false) + statedb, release, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true, false) if err != nil { return nil, err } + defer release() // Execute the trace intrinsicGas, err := types.IntrinsicGas(args.InputData(), nil, args.To == nil, api.backend.ChainConfig().Rules(block.Number())) diff --git a/node/cn/tracers/api_test.go b/node/cn/tracers/api_test.go index 290f6537f..0a808ddde 100644 --- a/node/cn/tracers/api_test.go +++ b/node/cn/tracers/api_test.go @@ -23,11 +23,13 @@ import ( "bytes" "context" "crypto/ecdsa" + "encoding/json" "errors" "fmt" "math/big" "reflect" "sort" + "sync/atomic" "testing" kaiaapi "github.com/kaiachain/kaia/api" @@ -58,6 +60,9 @@ type testBackend struct { engine consensus.Engine chaindb database.DBManager chain *blockchain.BlockChain + + refHook func() // Hook is invoked when the requested state is referenced + relHook func() // Hook is invoked when the requested state is released } func newTestBackend(t *testing.T, n int, gspec *blockchain.Genesis, generator func(i int, b *blockchain.BlockGen)) *testBackend { @@ -144,25 +149,33 @@ func (b *testBackend) ChainDB() database.DBManager { return b.chaindb } -func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) { +func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) { statedb, err := b.chain.StateAt(block.Root()) if err != nil { - return nil, errStateNotFound + return nil, nil, errStateNotFound + } + if b.refHook != nil { + b.refHook() + } + release := func() { + if b.relHook != nil { + b.relHook() + } } - return statedb, nil + return statedb, release, nil } -func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (blockchain.Message, vm.BlockContext, vm.TxContext, *state.StateDB, error) { +func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (blockchain.Message, vm.BlockContext, vm.TxContext, *state.StateDB, StateReleaseFunc, error) { parent := b.chain.GetBlock(block.ParentHash(), block.NumberU64()-1) if parent == nil { - return nil, vm.BlockContext{}, vm.TxContext{}, nil, errBlockNotFound + return nil, vm.BlockContext{}, vm.TxContext{}, nil, nil, errBlockNotFound } - statedb, err := b.chain.StateAt(parent.Root()) + statedb, release, err := b.StateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { - return nil, vm.BlockContext{}, vm.TxContext{}, nil, errStateNotFound + return nil, vm.BlockContext{}, vm.TxContext{}, nil, nil, errStateNotFound } if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.BlockContext{}, vm.TxContext{}, statedb, nil + return nil, vm.BlockContext{}, vm.TxContext{}, statedb, release, nil } // Recompute transactions up to the target index. signer := types.MakeSigner(b.chainConfig, block.Number()) @@ -171,15 +184,73 @@ func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block txContext := blockchain.NewEVMTxContext(msg, block.Header(), b.chainConfig) blockContext := blockchain.NewEVMBlockContext(block.Header(), b.chain, nil) if idx == txIndex { - return msg, blockContext, txContext, statedb, nil + return msg, blockContext, txContext, statedb, release, nil } vmenv := vm.NewEVM(blockContext, txContext, statedb, b.chainConfig, &vm.Config{Debug: true, EnableInternalTxTracing: true}) if _, err := blockchain.ApplyMessage(vmenv, msg); err != nil { - return nil, vm.BlockContext{}, vm.TxContext{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + return nil, vm.BlockContext{}, vm.TxContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } statedb.Finalise(true, true) } - return nil, vm.BlockContext{}, vm.TxContext{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) + return nil, vm.BlockContext{}, vm.TxContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) +} + +func TestTraceChain(t *testing.T) { + // Initialize test accounts + accounts := newAccounts(3) + genesis := &blockchain.Genesis{Alloc: blockchain.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(params.KAIA)}, + accounts[1].addr: {Balance: big.NewInt(params.KAIA)}, + accounts[2].addr: {Balance: big.NewInt(params.KAIA)}, + }} + genBlocks := 50 + signer := types.LatestSignerForChainID(params.TestChainConfig.ChainID) + + var ( + ref uint32 // total refs has made + rel uint32 // total rels has made + nonce uint64 + ) + backend := newTestBackend(t, genBlocks, genesis, func(i int, b *blockchain.BlockGen) { + // Transfer from account[0] to account[1] + // value: 1000 wei + // fee: 0 wei + for j := 0; j < i+1; j++ { + tx, _ := types.SignTx(types.NewTransaction(nonce, accounts[1].addr, big.NewInt(1000), params.TxGas, big.NewInt(0), nil), signer, accounts[0].key) + b.AddTx(tx) + nonce += 1 + } + }) + backend.refHook = func() { atomic.AddUint32(&ref, 1) } + backend.relHook = func() { atomic.AddUint32(&rel, 1) } + api := NewAPI(backend) + + single := `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}` + cases := []struct { + start uint64 + end uint64 + config *TraceConfig + }{ + {0, 50, nil}, // the entire chain range, blocks [1, 50] + {10, 20, nil}, // the middle chain range, blocks [11, 20] + } + for _, c := range cases { + ref, rel = 0, 0 // clean up the counters + + from, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.start)) + to, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.end)) + ret, err := api.traceChain(from, to, c.config, nil, nil) + assert.NoError(t, err) + + for _, trace := range ret { + for _, txTrace := range trace.Traces { + blob, _ := json.Marshal(txTrace.Result) + if string(blob) != single { + t.Error("Unexpected tracing result") + } + } + } + } } func TestTraceCall(t *testing.T) { diff --git a/params/bootnodes.go b/params/bootnodes.go index 7a462b8a7..73208c323 100644 --- a/params/bootnodes.go +++ b/params/bootnodes.go @@ -36,17 +36,13 @@ var MainnetBootnodes = map[common.ConnType]bootnodesByTypes{ []string{}, }, common.PROXYNODE: { - []string{ - "kni://18b36118cce093673499fc6e9aa196f047fe17a0de35b6f2a76a4557802f6abf9f89aa5e7330e93c9014b714b9df6378393611efe39aec9d3d831d6aa9d617ae@ston65.cypress.klaytn.net:32323?ntype=bn", // TODO: rename cypress - "kni://63f1c96874da85140ecca3ce24875cb5ef28fa228bc3572e16f690db4a48fc8067502d2f6e8f0c66fb558276a5ada1e4906852c7ae42b0003e9f9f25d1e123b1@ston873.cypress.klaytn.net:32323?ntype=bn", // TODO: rename cypress - "kni://94cc15e2014b86584908707de55800c0a2ea8a24dc5550dcb507043e4cf18ff04f21dc86ed17757dc63b1fa85bb418b901e5e24e4197ad4bbb0d96cd9389ed98@ston106.cypress.klaytn.net:32323?ntype=bn", // TODO: rename cypress - }, + []string{}, }, common.ENDPOINTNODE: { []string{ - "kni://18b36118cce093673499fc6e9aa196f047fe17a0de35b6f2a76a4557802f6abf9f89aa5e7330e93c9014b714b9df6378393611efe39aec9d3d831d6aa9d617ae@ston65.cypress.klaytn.net:32323?ntype=bn", // TODO: rename cypress - "kni://63f1c96874da85140ecca3ce24875cb5ef28fa228bc3572e16f690db4a48fc8067502d2f6e8f0c66fb558276a5ada1e4906852c7ae42b0003e9f9f25d1e123b1@ston873.cypress.klaytn.net:32323?ntype=bn", // TODO: rename cypress - "kni://94cc15e2014b86584908707de55800c0a2ea8a24dc5550dcb507043e4cf18ff04f21dc86ed17757dc63b1fa85bb418b901e5e24e4197ad4bbb0d96cd9389ed98@ston106.cypress.klaytn.net:32323?ntype=bn", // TODO: rename cypress + "kni://18b36118cce093673499fc6e9aa196f047fe17a0de35b6f2a76a4557802f6abf9f89aa5e7330e93c9014b714b9df6378393611efe39aec9d3d831d6aa9d617ae@ston65.node.kaia.io:32323?ntype=bn", + "kni://63f1c96874da85140ecca3ce24875cb5ef28fa228bc3572e16f690db4a48fc8067502d2f6e8f0c66fb558276a5ada1e4906852c7ae42b0003e9f9f25d1e123b1@ston873.node.kaia.io:32323?ntype=bn", + "kni://94cc15e2014b86584908707de55800c0a2ea8a24dc5550dcb507043e4cf18ff04f21dc86ed17757dc63b1fa85bb418b901e5e24e4197ad4bbb0d96cd9389ed98@ston106.node.kaia.io:32323?ntype=bn", }, }, } @@ -54,18 +50,14 @@ var MainnetBootnodes = map[common.ConnType]bootnodesByTypes{ // KairosBootnodes are the URLs of bootnodes running on the Kairos network. var KairosBootnodes = map[common.ConnType]bootnodesByTypes{ common.CONSENSUSNODE: { - []string{ - "kni://d8adb5a300d7ee0fcde4d6777362c1e0e03d208a2f3978d6d3993a2ada4a64af2580b97d4b4bf21201b1596cea761ecf53f196153bae8bbb0948b3c6397303b2@ston98.baobab.klaytn.net:32323?ntype=bn", // TODO: rename baobab - }, - }, - common.ENDPOINTNODE: { - []string{ - "kni://779d766628247ebda5f3e108e9303bd8efdb8eba9fd8d6c529e2614aec7207ebf6614fe7e61d0d99b75e8b23dd3a679b112fd0de7e4e71a7008f0718710da48f@ston45.baobab.klaytn.net:32323?ntype=bn", // TODO: rename baobab - }, + []string{}, }, common.PROXYNODE: { + []string{}, + }, + common.ENDPOINTNODE: { []string{ - "kni://779d766628247ebda5f3e108e9303bd8efdb8eba9fd8d6c529e2614aec7207ebf6614fe7e61d0d99b75e8b23dd3a679b112fd0de7e4e71a7008f0718710da48f@ston45.baobab.klaytn.net:32323?ntype=bn", // TODO: rename baobab + "kni://779d766628247ebda5f3e108e9303bd8efdb8eba9fd8d6c529e2614aec7207ebf6614fe7e61d0d99b75e8b23dd3a679b112fd0de7e4e71a7008f0718710da48f@ston45-kairos.node.kaia.io:32323?ntype=bn", }, }, } diff --git a/params/version.go b/params/version.go index 79d814d12..15b3c5c93 100644 --- a/params/version.go +++ b/params/version.go @@ -28,7 +28,7 @@ const ( ReleaseNum = 0 VersionMajor = 1 // Major version component of the current release VersionMinor = 0 // Minor version component of the current release - VersionPatch = 2 // Patch version component of the current release + VersionPatch = 3 // Patch version component of the current release ) // Version holds the textual version string. diff --git a/reward/supply_manager.go b/reward/supply_manager.go index 6f04c65b0..4ff78a076 100644 --- a/reward/supply_manager.go +++ b/reward/supply_manager.go @@ -411,15 +411,30 @@ func (sm *supplyManager) accumulateReward(from, to uint64, fromCheckpoint *datab // Accumulate one block var ( - header = sm.chain.GetHeaderByNumber(num) - block = sm.chain.GetBlockByNumber(num) - receipts = sm.chain.GetReceiptsByBlockHash(block.Hash()) - rules = sm.chain.Config().Rules(new(big.Int).SetUint64(num)) - pset, err = sm.gov.EffectiveParams(num) + header = sm.chain.GetHeaderByNumber(num) + block = sm.chain.GetBlockByNumber(num) + rules = sm.chain.Config().Rules(new(big.Int).SetUint64(num)) ) + if header == nil { + logger.Error("Header not found", "number", num) + return nil, errNoBlock + } + if block == nil { + logger.Error("Block not found", "number", num) + return nil, errNoBlock + } + + receipts := sm.chain.GetReceiptsByBlockHash(block.Hash()) + if receipts == nil { + logger.Error("Receipts not found", "number", num) + return nil, errNoBlock + } + + pset, err := sm.gov.EffectiveParams(num) if err != nil { return nil, err } + blockTotal, err := GetTotalReward(header, block.Transactions(), receipts, rules, pset) if err != nil { return nil, err diff --git a/snapshot/disklayer.go b/snapshot/disklayer.go index de2997de8..944c9188c 100644 --- a/snapshot/disklayer.go +++ b/snapshot/disklayer.go @@ -50,6 +50,16 @@ type diskLayer struct { lock sync.RWMutex } +// Release releases underlying resources; specifically the fastcache requires +// Reset() in order to not leak memory. +// OBS: It does not invoke Close on the diskdb +func (dl *diskLayer) Release() error { + if dl.cache != nil { + dl.cache.Reset() + } + return nil +} + // Root returns root hash for which this snapshot was made. func (dl *diskLayer) Root() common.Hash { return dl.root diff --git a/snapshot/snapshot.go b/snapshot/snapshot.go index 78bce0e81..3f79549ae 100644 --- a/snapshot/snapshot.go +++ b/snapshot/snapshot.go @@ -666,6 +666,13 @@ func diffToDisk(bottom *diffLayer) *diskLayer { return res } +// Release releases resources +func (t *Tree) Release() { + if dl := t.disklayer(); dl != nil { + dl.Release() + } +} + // Journal commits an entire diff hierarchy to disk into a single journal entry. // This is meant to be used during shutdown to persist the snapshot without // flattening everything down (bad for reorgs). @@ -823,13 +830,15 @@ func (t *Tree) disklayer() *diskLayer { case *diskLayer: return layer case *diffLayer: + layer.lock.RLock() + defer layer.lock.RUnlock() return layer.origin default: panic(fmt.Sprintf("%T: undefined layer", snap)) } } -// diskRoot is a internal helper function to return the disk layer root. +// diskRoot is an internal helper function to return the disk layer root. // The lock of snapTree is assumed to be held already. func (t *Tree) diskRoot() common.Hash { disklayer := t.disklayer() diff --git a/storage/database/db_manager.go b/storage/database/db_manager.go index 6d162e408..755cc43db 100644 --- a/storage/database/db_manager.go +++ b/storage/database/db_manager.go @@ -822,7 +822,7 @@ func (dbm *databaseManager) CreateMigrationDBAndSetStatus(blockNum uint64) error // FinishStateMigration updates stateTrieDB and removes the old one. // The function should be called only after when state trie migration is finished. -// It returns a channel that closes when removeDB is finished. +// It returns a channel that closes when removeOldDB is finished. func (dbm *databaseManager) FinishStateMigration(succeed bool) chan struct{} { // lock to prevent from a conflict of reading state DB and changing state DB dbm.lockInMigration.Lock() @@ -844,8 +844,6 @@ func (dbm *databaseManager) FinishStateMigration(succeed bool) chan struct{} { dbm.setDBDir(StateTrieDB, dbDirToBeUsed) dbm.dbs[StateTrieDB] = dbToBeUsed - dbm.setStateTrieMigrationStatus(0) - dbm.dbs[StateTrieMigrationDB] = nil dbm.setDBDir(StateTrieMigrationDB, "") @@ -853,12 +851,17 @@ func (dbm *databaseManager) FinishStateMigration(succeed bool) chan struct{} { dbToBeRemoved.Close() endCheck := make(chan struct{}) - go removeDB(dbPathToBeRemoved, endCheck) + go dbm.removeOldDB(dbPathToBeRemoved, endCheck) + + // Used only for testing. Closing it takes time due to the large size of the database return endCheck } -func removeDB(dbPath string, endCheck chan struct{}) { +// Remove old database. This is called once migration(copy) is done. +func (dbm *databaseManager) removeOldDB(dbPath string, endCheck chan struct{}) { defer func() { + // Set the completion mark if old database is completely removed (possibly not be removed if error occurs) + dbm.setStateTrieMigrationStatus(0) if endCheck != nil { close(endCheck) } diff --git a/tests/README.md b/tests/README.md index b26a10c15..77e3867f8 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,6 +1,6 @@ # How to use Kaia tests -[Kaia tests](https://github.com/kaiachain/kaia-tests) is not currently included +[Kaia tests](https://github.com/kaiachain/kaia-core-tests) is not currently included here due to its relatively large size. It will be added as a git submodule later. @@ -17,7 +17,7 @@ clone it in `$HOME/workspace`. ``` $ cd $HOME/workspace -$ git clone git@github.com:kaiachain/kaia-tests.git +$ git clone git@github.com:kaiachain/kaia-core-tests.git ``` @@ -28,7 +28,7 @@ We assume Kaia source tree is located in ``` $ cd $HOME/workspace/go/src/github.com/kaiachain/kaia/tests -$ ln -s $HOME/workspace/kaia-tests testdata +$ ln -s $HOME/workspace/kaia-core-tests testdata ``` diff --git a/tests/state_reexec_test.go b/tests/state_reexec_test.go index 1c243ea3c..7af5f72d4 100644 --- a/tests/state_reexec_test.go +++ b/tests/state_reexec_test.go @@ -159,7 +159,10 @@ func testStateReexec_run(t *testing.T, node *blockchainTestNode, num uint64) { block := node.cn.BlockChain().GetBlockByNumber(num) t.Logf("Regenerating state at block %d", num) - state, err := node.cn.APIBackend.StateAtBlock(context.Background(), block, 10, nil, false, false) + state, release, err := node.cn.APIBackend.StateAtBlock(context.Background(), block, 10, nil, false, false) + if release != nil { + release() + } require.Nil(t, err) // Regenerated state must match the stored block's stateRoot